[PATCH 5/5] aarch64: Enable building the aarch64 cipher assembly for windows

Martin Storsjö martin at martin.st
Thu Mar 22 22:32:40 CET 2018


* cipher/asm-common-aarch64.h: New.
* cipher/camellia-aarch64.S: Use ELF macro, use x19 instead of x18.
* cipher/chacha20-aarch64.S: Use ELF macro, don't use GOT on windows.
* cipher/cipher-gcm-armv8-aarch64-ce.S: Use ELF macro.
* cipher/rijndael-aarch64.S: Use ELF macro.
* cipher/rijndael-armv8-aarch64-ce.S: Use ELF macro.
* cipher/sha1-armv8-aarch64-ce.S: Use ELF macro.
* cipher/sha256-armv8-aarch64-ce.S: Use ELF macro.
* cipher/twofish-aarch64.S: Use ELF macro.
* configure.ac: Don't require .size and .type in aarch64 assembly check.
--
Don't require .type and .size in configure; we can make
them optional via a preprocessor macro.

This is mostly a mechanical change, wrapping the .type and .size
directives in an ELF() macro, with two actual manual changes:
(when targeting windows):
- Don't load global symbols via a GOT (in chacha20)
- Don't use the x18 register (in camellia); back up and restore x19
  in the prologue/epilogue and use that instead.

x18 is a platform specific register; on linux, it's free to be used
by user code, while it's reserved for platform use on windows and
darwin. Always use x19 instead of x18 for consistency.

Signed-off-by: Martin Storsjö <martin at martin.st>
---
This isn't strictly necessary for building libgcrypt for windows
on aarch64; previously configure concludes that the assembly can't
be built since the .type and .size directives don't work. This
just allows using more of the existing assembly routines.

This also probably has the effect that the same assembly gets
enabled when targeting darwin (iOS), but building with assembly
enabled doesn't work for darwin anyway (even prior to this change,
since darwin requires an extra leading underscore on all symbols,
while the mpi/aarch64 code gets automatically enabled).
---
 cipher/asm-common-aarch64.h          | 32 ++++++++++++++++++++
 cipher/camellia-aarch64.S            | 23 ++++++++------
 cipher/chacha20-aarch64.S            | 12 ++++++--
 cipher/cipher-gcm-armv8-aarch64-ce.S | 10 +++----
 cipher/rijndael-aarch64.S            | 10 +++----
 cipher/rijndael-armv8-aarch64-ce.S   | 58 ++++++++++++++++++------------------
 cipher/sha1-armv8-aarch64-ce.S       |  6 ++--
 cipher/sha256-armv8-aarch64-ce.S     |  6 ++--
 cipher/twofish-aarch64.S             | 10 +++----
 configure.ac                         |  4 ---
 10 files changed, 105 insertions(+), 66 deletions(-)
 create mode 100644 cipher/asm-common-aarch64.h

diff --git a/cipher/asm-common-aarch64.h b/cipher/asm-common-aarch64.h
new file mode 100644
index 0000000..814b7ad
--- /dev/null
+++ b/cipher/asm-common-aarch64.h
@@ -0,0 +1,32 @@
+/* asm-common-aarch64.h  -  Common macros for AArch64 assembly
+ *
+ * Copyright (C) 2018 Martin Storsjö <martin at martin.st>
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef GCRY_ASM_COMMON_AARCH64_H
+#define GCRY_ASM_COMMON_AARCH64_H
+
+#include <config.h>
+
+#ifdef __ELF__
+# define ELF(...) __VA_ARGS__
+#else
+# define ELF(...) /*_*/
+#endif
+
+#endif /* GCRY_ASM_COMMON_AARCH64_H */
diff --git a/cipher/camellia-aarch64.S b/cipher/camellia-aarch64.S
index 68d2a7d..c3cc463 100644
--- a/cipher/camellia-aarch64.S
+++ b/cipher/camellia-aarch64.S
@@ -19,7 +19,7 @@
  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <config.h>
+#include "asm-common-aarch64.h"
 
 #if defined(__AARCH64EL__)
 #ifdef HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS
@@ -55,12 +55,12 @@
 #define RT0 w15
 #define RT1 w16
 #define RT2 w17
-#define RT3 w18
+#define RT3 w19
 
 #define xRT0 x15
 #define xRT1 x16
 #define xRT2 x17
-#define xRT3 x18
+#define xRT3 x19
 
 #ifdef __AARCH64EL__
   #define host_to_be(reg, rtmp) \
@@ -198,9 +198,10 @@
 	str_output_be(RDST, YL, YR, XL, XR, RT0, RT1);
 
 .globl _gcry_camellia_arm_encrypt_block
-.type   _gcry_camellia_arm_encrypt_block, at function;
+ELF(.type   _gcry_camellia_arm_encrypt_block, at function;)
 
 _gcry_camellia_arm_encrypt_block:
+	stp x19, x30, [sp, #-16]!
 	/* input:
 	 *	x0: keytable
 	 *	x1: dst
@@ -227,6 +228,7 @@ _gcry_camellia_arm_encrypt_block:
 
 	outunpack(24);
 
+	ldp x19, x30, [sp], #16
 	ret;
 .ltorg
 
@@ -236,14 +238,16 @@ _gcry_camellia_arm_encrypt_block:
 
 	outunpack(32);
 
+	ldp x19, x30, [sp], #16
 	ret;
 .ltorg
-.size _gcry_camellia_arm_encrypt_block,.-_gcry_camellia_arm_encrypt_block;
+ELF(.size _gcry_camellia_arm_encrypt_block,.-_gcry_camellia_arm_encrypt_block;)
 
 .globl _gcry_camellia_arm_decrypt_block
-.type   _gcry_camellia_arm_decrypt_block, at function;
+ELF(.type   _gcry_camellia_arm_decrypt_block, at function;)
 
 _gcry_camellia_arm_decrypt_block:
+	stp x19, x30, [sp, #-16]!
 	/* input:
 	 *	x0: keytable
 	 *	x1: dst
@@ -271,6 +275,7 @@ _gcry_camellia_arm_decrypt_block:
 
 	outunpack(0);
 
+	ldp x19, x30, [sp], #16
 	ret;
 .ltorg
 
@@ -281,11 +286,11 @@ _gcry_camellia_arm_decrypt_block:
 
 	b .Ldec_128;
 .ltorg
-.size _gcry_camellia_arm_decrypt_block,.-_gcry_camellia_arm_decrypt_block;
+ELF(.size _gcry_camellia_arm_decrypt_block,.-_gcry_camellia_arm_decrypt_block;)
 
 /* Encryption/Decryption tables */
 .globl _gcry_camellia_arm_tables
-.type  _gcry_camellia_arm_tables, at object;
+ELF(.type  _gcry_camellia_arm_tables, at object;)
 .balign 32
 _gcry_camellia_arm_tables:
 .Lcamellia_sp1110:
@@ -551,7 +556,7 @@ _gcry_camellia_arm_tables:
 .long 0xc7c7c700, 0x008f8f8f, 0xe300e3e3, 0xf4f400f4
 .long 0x80808000, 0x00010101, 0x40004040, 0xc7c700c7
 .long 0x9e9e9e00, 0x003d3d3d, 0x4f004f4f, 0x9e9e009e
-.size _gcry_camellia_arm_tables,.-_gcry_camellia_arm_tables;
+ELF(.size _gcry_camellia_arm_tables,.-_gcry_camellia_arm_tables;)
 
 #endif /*HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS*/
 #endif /*__AARCH64EL__*/
diff --git a/cipher/chacha20-aarch64.S b/cipher/chacha20-aarch64.S
index 5990a08..3844d4e 100644
--- a/cipher/chacha20-aarch64.S
+++ b/cipher/chacha20-aarch64.S
@@ -27,7 +27,7 @@
  * Public domain.
  */
 
-#include <config.h>
+#include "asm-common-aarch64.h"
 
 #if defined(__AARCH64EL__) && \
     defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \
@@ -38,9 +38,15 @@
 
 .text
 
+#ifdef _WIN32
+#define GET_DATA_POINTER(reg, name) \
+	adrp    reg, name ; \
+	add     reg, reg, #:lo12:name ;
+#else
 #define GET_DATA_POINTER(reg, name) \
 	adrp    reg, :got:name ; \
 	ldr     reg, [reg, #:got_lo12:name] ;
+#endif
 
 /* register macros */
 #define INPUT     x0
@@ -148,7 +154,7 @@ chacha20_data:
 
 .align 3
 .globl _gcry_chacha20_aarch64_blocks4
-.type _gcry_chacha20_aarch64_blocks4,%function;
+ELF(.type _gcry_chacha20_aarch64_blocks4,%function;)
 
 _gcry_chacha20_aarch64_blocks4:
 	/* input:
@@ -303,6 +309,6 @@ _gcry_chacha20_aarch64_blocks4:
 
 	eor x0, x0, x0
 	ret
-.size _gcry_chacha20_aarch64_blocks4, .-_gcry_chacha20_aarch64_blocks4;
+ELF(.size _gcry_chacha20_aarch64_blocks4, .-_gcry_chacha20_aarch64_blocks4;)
 
 #endif
diff --git a/cipher/cipher-gcm-armv8-aarch64-ce.S b/cipher/cipher-gcm-armv8-aarch64-ce.S
index 0cfaf1c..b6c4f59 100644
--- a/cipher/cipher-gcm-armv8-aarch64-ce.S
+++ b/cipher/cipher-gcm-armv8-aarch64-ce.S
@@ -17,7 +17,7 @@
  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <config.h>
+#include "asm-common-aarch64.h"
 
 #if defined(__AARCH64EL__) && \
     defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \
@@ -174,7 +174,7 @@ gcry_gcm_reduction_constant:
  */
 .align 3
 .globl _gcry_ghash_armv8_ce_pmull
-.type  _gcry_ghash_armv8_ce_pmull,%function;
+ELF(.type  _gcry_ghash_armv8_ce_pmull,%function;)
 _gcry_ghash_armv8_ce_pmull:
   /* input:
    *    x0: gcm_key
@@ -360,7 +360,7 @@ _gcry_ghash_armv8_ce_pmull:
 .Ldo_nothing:
   mov x0, #0
   ret
-.size _gcry_ghash_armv8_ce_pmull,.-_gcry_ghash_armv8_ce_pmull;
+ELF(.size _gcry_ghash_armv8_ce_pmull,.-_gcry_ghash_armv8_ce_pmull;)
 
 
 /*
@@ -368,7 +368,7 @@ _gcry_ghash_armv8_ce_pmull:
  */
 .align 3
 .globl _gcry_ghash_setup_armv8_ce_pmull
-.type  _gcry_ghash_setup_armv8_ce_pmull,%function;
+ELF(.type  _gcry_ghash_setup_armv8_ce_pmull,%function;)
 _gcry_ghash_setup_armv8_ce_pmull:
   /* input:
    *	x0: gcm_key
@@ -408,6 +408,6 @@ _gcry_ghash_setup_armv8_ce_pmull:
   st1 {rh5.16b-rh6.16b}, [x1]
 
   ret
-.size _gcry_ghash_setup_armv8_ce_pmull,.-_gcry_ghash_setup_armv8_ce_pmull;
+ELF(.size _gcry_ghash_setup_armv8_ce_pmull,.-_gcry_ghash_setup_armv8_ce_pmull;)
 
 #endif
diff --git a/cipher/rijndael-aarch64.S b/cipher/rijndael-aarch64.S
index e533bbe..aad7487 100644
--- a/cipher/rijndael-aarch64.S
+++ b/cipher/rijndael-aarch64.S
@@ -18,7 +18,7 @@
  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <config.h>
+#include "asm-common-aarch64.h"
 
 #if defined(__AARCH64EL__)
 #ifdef HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS
@@ -206,7 +206,7 @@
 	addroundkey(rna, rnb, rnc, rnd, ra, rb, rc, rd, dummy);
 
 .globl _gcry_aes_arm_encrypt_block
-.type   _gcry_aes_arm_encrypt_block,%function;
+ELF(.type   _gcry_aes_arm_encrypt_block,%function;)
 
 _gcry_aes_arm_encrypt_block:
 	/* input:
@@ -285,7 +285,7 @@ _gcry_aes_arm_encrypt_block:
 	lastencround(11, RNA, RNB, RNC, RND, RA, RB, RC, RD);
 
 	b .Lenc_done;
-.size _gcry_aes_arm_encrypt_block,.-_gcry_aes_arm_encrypt_block;
+ELF(.size _gcry_aes_arm_encrypt_block,.-_gcry_aes_arm_encrypt_block;)
 
 #define addroundkey_dec(round, ra, rb, rc, rd, rna, rnb, rnc, rnd) \
 	ldr rna, [CTX, #(((round) * 16) + 0 * 4)]; \
@@ -429,7 +429,7 @@ _gcry_aes_arm_encrypt_block:
 	addroundkey(rna, rnb, rnc, rnd, ra, rb, rc, rd, dummy);
 
 .globl _gcry_aes_arm_decrypt_block
-.type   _gcry_aes_arm_decrypt_block,%function;
+ELF(.type   _gcry_aes_arm_decrypt_block,%function;)
 
 _gcry_aes_arm_decrypt_block:
 	/* input:
@@ -504,7 +504,7 @@ _gcry_aes_arm_decrypt_block:
 	decround(9, RA, RB, RC, RD, RNA, RNB, RNC, RND, preload_first_key);
 
 	b .Ldec_tail;
-.size _gcry_aes_arm_decrypt_block,.-_gcry_aes_arm_decrypt_block;
+ELF(.size _gcry_aes_arm_decrypt_block,.-_gcry_aes_arm_decrypt_block;)
 
 #endif /*HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS*/
 #endif /*__AARCH64EL__ */
diff --git a/cipher/rijndael-armv8-aarch64-ce.S b/cipher/rijndael-armv8-aarch64-ce.S
index 40097a7..5859557 100644
--- a/cipher/rijndael-armv8-aarch64-ce.S
+++ b/cipher/rijndael-armv8-aarch64-ce.S
@@ -17,7 +17,7 @@
  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <config.h>
+#include "asm-common-aarch64.h"
 
 #if defined(__AARCH64EL__) && \
     defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \
@@ -239,7 +239,7 @@
  */
 .align 3
 .globl _gcry_aes_enc_armv8_ce
-.type  _gcry_aes_enc_armv8_ce,%function;
+ELF(.type  _gcry_aes_enc_armv8_ce,%function;)
 _gcry_aes_enc_armv8_ce:
   /* input:
    *    x0: keysched
@@ -291,7 +291,7 @@ _gcry_aes_enc_armv8_ce:
   CLEAR_REG(vk13)
   CLEAR_REG(vk14)
   b .Lenc1_tail
-.size _gcry_aes_enc_armv8_ce,.-_gcry_aes_enc_armv8_ce;
+ELF(.size _gcry_aes_enc_armv8_ce,.-_gcry_aes_enc_armv8_ce;)
 
 
 /*
@@ -301,7 +301,7 @@ _gcry_aes_enc_armv8_ce:
  */
 .align 3
 .globl _gcry_aes_dec_armv8_ce
-.type  _gcry_aes_dec_armv8_ce,%function;
+ELF(.type  _gcry_aes_dec_armv8_ce,%function;)
 _gcry_aes_dec_armv8_ce:
   /* input:
    *    x0: keysched
@@ -353,7 +353,7 @@ _gcry_aes_dec_armv8_ce:
   CLEAR_REG(vk13)
   CLEAR_REG(vk14)
   b .Ldec1_tail
-.size _gcry_aes_dec_armv8_ce,.-_gcry_aes_dec_armv8_ce;
+ELF(.size _gcry_aes_dec_armv8_ce,.-_gcry_aes_dec_armv8_ce;)
 
 
 /*
@@ -366,7 +366,7 @@ _gcry_aes_dec_armv8_ce:
 
 .align 3
 .globl _gcry_aes_cbc_enc_armv8_ce
-.type  _gcry_aes_cbc_enc_armv8_ce,%function;
+ELF(.type  _gcry_aes_cbc_enc_armv8_ce,%function;)
 _gcry_aes_cbc_enc_armv8_ce:
   /* input:
    *    x0: keysched
@@ -419,7 +419,7 @@ _gcry_aes_cbc_enc_armv8_ce:
 
 .Lcbc_enc_skip:
   ret
-.size _gcry_aes_cbc_enc_armv8_ce,.-_gcry_aes_cbc_enc_armv8_ce;
+ELF(.size _gcry_aes_cbc_enc_armv8_ce,.-_gcry_aes_cbc_enc_armv8_ce;)
 
 /*
  * void _gcry_aes_cbc_dec_armv8_ce (const void *keysched,
@@ -430,7 +430,7 @@ _gcry_aes_cbc_enc_armv8_ce:
 
 .align 3
 .globl _gcry_aes_cbc_dec_armv8_ce
-.type  _gcry_aes_cbc_dec_armv8_ce,%function;
+ELF(.type  _gcry_aes_cbc_dec_armv8_ce,%function;)
 _gcry_aes_cbc_dec_armv8_ce:
   /* input:
    *    x0: keysched
@@ -515,7 +515,7 @@ _gcry_aes_cbc_dec_armv8_ce:
 
 .Lcbc_dec_skip:
   ret
-.size _gcry_aes_cbc_dec_armv8_ce,.-_gcry_aes_cbc_dec_armv8_ce;
+ELF(.size _gcry_aes_cbc_dec_armv8_ce,.-_gcry_aes_cbc_dec_armv8_ce;)
 
 
 /*
@@ -527,7 +527,7 @@ _gcry_aes_cbc_dec_armv8_ce:
 
 .align 3
 .globl _gcry_aes_ctr_enc_armv8_ce
-.type  _gcry_aes_ctr_enc_armv8_ce,%function;
+ELF(.type  _gcry_aes_ctr_enc_armv8_ce,%function;)
 _gcry_aes_ctr_enc_armv8_ce:
   /* input:
    *    r0: keysched
@@ -669,7 +669,7 @@ _gcry_aes_ctr_enc_armv8_ce:
 .Lctr_enc_skip:
   ret
 
-.size _gcry_aes_ctr_enc_armv8_ce,.-_gcry_aes_ctr_enc_armv8_ce;
+ELF(.size _gcry_aes_ctr_enc_armv8_ce,.-_gcry_aes_ctr_enc_armv8_ce;)
 
 
 /*
@@ -681,7 +681,7 @@ _gcry_aes_ctr_enc_armv8_ce:
 
 .align 3
 .globl _gcry_aes_cfb_enc_armv8_ce
-.type  _gcry_aes_cfb_enc_armv8_ce,%function;
+ELF(.type  _gcry_aes_cfb_enc_armv8_ce,%function;)
 _gcry_aes_cfb_enc_armv8_ce:
   /* input:
    *    r0: keysched
@@ -732,7 +732,7 @@ _gcry_aes_cfb_enc_armv8_ce:
 
 .Lcfb_enc_skip:
   ret
-.size _gcry_aes_cfb_enc_armv8_ce,.-_gcry_aes_cfb_enc_armv8_ce;
+ELF(.size _gcry_aes_cfb_enc_armv8_ce,.-_gcry_aes_cfb_enc_armv8_ce;)
 
 
 /*
@@ -744,7 +744,7 @@ _gcry_aes_cfb_enc_armv8_ce:
 
 .align 3
 .globl _gcry_aes_cfb_dec_armv8_ce
-.type  _gcry_aes_cfb_dec_armv8_ce,%function;
+ELF(.type  _gcry_aes_cfb_dec_armv8_ce,%function;)
 _gcry_aes_cfb_dec_armv8_ce:
   /* input:
    *    r0: keysched
@@ -829,7 +829,7 @@ _gcry_aes_cfb_dec_armv8_ce:
 
 .Lcfb_dec_skip:
   ret
-.size _gcry_aes_cfb_dec_armv8_ce,.-_gcry_aes_cfb_dec_armv8_ce;
+ELF(.size _gcry_aes_cfb_dec_armv8_ce,.-_gcry_aes_cfb_dec_armv8_ce;)
 
 
 /*
@@ -846,7 +846,7 @@ _gcry_aes_cfb_dec_armv8_ce:
 
 .align 3
 .globl _gcry_aes_ocb_enc_armv8_ce
-.type  _gcry_aes_ocb_enc_armv8_ce,%function;
+ELF(.type  _gcry_aes_ocb_enc_armv8_ce,%function;)
 _gcry_aes_ocb_enc_armv8_ce:
   /* input:
    *    x0: keysched
@@ -979,7 +979,7 @@ _gcry_aes_ocb_enc_armv8_ce:
   CLEAR_REG(v16)
 
   ret
-.size _gcry_aes_ocb_enc_armv8_ce,.-_gcry_aes_ocb_enc_armv8_ce;
+ELF(.size _gcry_aes_ocb_enc_armv8_ce,.-_gcry_aes_ocb_enc_armv8_ce;)
 
 
 /*
@@ -996,7 +996,7 @@ _gcry_aes_ocb_enc_armv8_ce:
 
 .align 3
 .globl _gcry_aes_ocb_dec_armv8_ce
-.type  _gcry_aes_ocb_dec_armv8_ce,%function;
+ELF(.type  _gcry_aes_ocb_dec_armv8_ce,%function;)
 _gcry_aes_ocb_dec_armv8_ce:
   /* input:
    *    x0: keysched
@@ -1129,7 +1129,7 @@ _gcry_aes_ocb_dec_armv8_ce:
   CLEAR_REG(v16)
 
   ret
-.size _gcry_aes_ocb_dec_armv8_ce,.-_gcry_aes_ocb_dec_armv8_ce;
+ELF(.size _gcry_aes_ocb_dec_armv8_ce,.-_gcry_aes_ocb_dec_armv8_ce;)
 
 
 /*
@@ -1145,7 +1145,7 @@ _gcry_aes_ocb_dec_armv8_ce:
 
 .align 3
 .globl _gcry_aes_ocb_auth_armv8_ce
-.type  _gcry_aes_ocb_auth_armv8_ce,%function;
+ELF(.type  _gcry_aes_ocb_auth_armv8_ce,%function;)
 _gcry_aes_ocb_auth_armv8_ce:
   /* input:
    *    x0: keysched
@@ -1273,7 +1273,7 @@ _gcry_aes_ocb_auth_armv8_ce:
   CLEAR_REG(v16)
 
   ret
-.size _gcry_aes_ocb_auth_armv8_ce,.-_gcry_aes_ocb_auth_armv8_ce;
+ELF(.size _gcry_aes_ocb_auth_armv8_ce,.-_gcry_aes_ocb_auth_armv8_ce;)
 
 
 /*
@@ -1285,7 +1285,7 @@ _gcry_aes_ocb_auth_armv8_ce:
 
 .align 3
 .globl _gcry_aes_xts_enc_armv8_ce
-.type  _gcry_aes_xts_enc_armv8_ce,%function;
+ELF(.type  _gcry_aes_xts_enc_armv8_ce,%function;)
 _gcry_aes_xts_enc_armv8_ce:
   /* input:
    *    r0: keysched
@@ -1410,7 +1410,7 @@ _gcry_aes_xts_enc_armv8_ce:
 .Lxts_enc_skip:
   ret
 
-.size _gcry_aes_xts_enc_armv8_ce,.-_gcry_aes_xts_enc_armv8_ce;
+ELF(.size _gcry_aes_xts_enc_armv8_ce,.-_gcry_aes_xts_enc_armv8_ce;)
 
 
 /*
@@ -1422,7 +1422,7 @@ _gcry_aes_xts_enc_armv8_ce:
 
 .align 3
 .globl _gcry_aes_xts_dec_armv8_ce
-.type  _gcry_aes_xts_dec_armv8_ce,%function;
+ELF(.type  _gcry_aes_xts_dec_armv8_ce,%function;)
 _gcry_aes_xts_dec_armv8_ce:
   /* input:
    *    r0: keysched
@@ -1547,7 +1547,7 @@ _gcry_aes_xts_dec_armv8_ce:
 .Lxts_dec_skip:
   ret
 
-.size _gcry_aes_xts_dec_armv8_ce,.-_gcry_aes_xts_dec_armv8_ce;
+ELF(.size _gcry_aes_xts_dec_armv8_ce,.-_gcry_aes_xts_dec_armv8_ce;)
 
 
 /*
@@ -1555,7 +1555,7 @@ _gcry_aes_xts_dec_armv8_ce:
  */
 .align 3
 .globl _gcry_aes_sbox4_armv8_ce
-.type  _gcry_aes_sbox4_armv8_ce,%function;
+ELF(.type  _gcry_aes_sbox4_armv8_ce,%function;)
 _gcry_aes_sbox4_armv8_ce:
   /* See "Gouvêa, C. P. L. & López, J. Implementing GCM on ARMv8. Topics in
    * Cryptology — CT-RSA 2015" for details.
@@ -1568,7 +1568,7 @@ _gcry_aes_sbox4_armv8_ce:
   mov w0, v0.S[0]
   CLEAR_REG(v0)
   ret
-.size _gcry_aes_sbox4_armv8_ce,.-_gcry_aes_sbox4_armv8_ce;
+ELF(.size _gcry_aes_sbox4_armv8_ce,.-_gcry_aes_sbox4_armv8_ce;)
 
 
 /*
@@ -1576,13 +1576,13 @@ _gcry_aes_sbox4_armv8_ce:
  */
 .align 3
 .globl _gcry_aes_invmixcol_armv8_ce
-.type  _gcry_aes_invmixcol_armv8_ce,%function;
+ELF(.type  _gcry_aes_invmixcol_armv8_ce,%function;)
 _gcry_aes_invmixcol_armv8_ce:
   ld1 {v0.16b}, [x1]
   aesimc v0.16b, v0.16b
   st1 {v0.16b}, [x0]
   CLEAR_REG(v0)
   ret
-.size _gcry_aes_invmixcol_armv8_ce,.-_gcry_aes_invmixcol_armv8_ce;
+ELF(.size _gcry_aes_invmixcol_armv8_ce,.-_gcry_aes_invmixcol_armv8_ce;)
 
 #endif
diff --git a/cipher/sha1-armv8-aarch64-ce.S b/cipher/sha1-armv8-aarch64-ce.S
index ec1810d..aeb67a1 100644
--- a/cipher/sha1-armv8-aarch64-ce.S
+++ b/cipher/sha1-armv8-aarch64-ce.S
@@ -17,7 +17,7 @@
  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <config.h>
+#include "asm-common-aarch64.h"
 
 #if defined(__AARCH64EL__) && \
     defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \
@@ -103,7 +103,7 @@ gcry_sha1_aarch64_ce_K_VEC:
  */
 .align 3
 .globl _gcry_sha1_transform_armv8_ce
-.type  _gcry_sha1_transform_armv8_ce,%function;
+ELF(.type  _gcry_sha1_transform_armv8_ce,%function;)
 _gcry_sha1_transform_armv8_ce:
   /* input:
    *	x0: ctx, CTX
@@ -199,6 +199,6 @@ _gcry_sha1_transform_armv8_ce:
 .Ldo_nothing:
   mov x0, #0
   ret
-.size _gcry_sha1_transform_armv8_ce,.-_gcry_sha1_transform_armv8_ce;
+ELF(.size _gcry_sha1_transform_armv8_ce,.-_gcry_sha1_transform_armv8_ce;)
 
 #endif
diff --git a/cipher/sha256-armv8-aarch64-ce.S b/cipher/sha256-armv8-aarch64-ce.S
index a4575da..6b3ad32 100644
--- a/cipher/sha256-armv8-aarch64-ce.S
+++ b/cipher/sha256-armv8-aarch64-ce.S
@@ -17,7 +17,7 @@
  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <config.h>
+#include "asm-common-aarch64.h"
 
 #if defined(__AARCH64EL__) && \
     defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \
@@ -113,7 +113,7 @@ gcry_sha256_aarch64_ce_K:
  */
 .align 3
 .globl _gcry_sha256_transform_armv8_ce
-.type  _gcry_sha256_transform_armv8_ce,%function;
+ELF(.type  _gcry_sha256_transform_armv8_ce,%function;)
 _gcry_sha256_transform_armv8_ce:
   /* input:
    *	r0: ctx, CTX
@@ -213,6 +213,6 @@ _gcry_sha256_transform_armv8_ce:
 .Ldo_nothing:
   mov x0, #0
   ret
-.size _gcry_sha256_transform_armv8_ce,.-_gcry_sha256_transform_armv8_ce;
+ELF(.size _gcry_sha256_transform_armv8_ce,.-_gcry_sha256_transform_armv8_ce;)
 
 #endif
diff --git a/cipher/twofish-aarch64.S b/cipher/twofish-aarch64.S
index 99c4675..adee412 100644
--- a/cipher/twofish-aarch64.S
+++ b/cipher/twofish-aarch64.S
@@ -18,7 +18,7 @@
  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <config.h>
+#include "asm-common-aarch64.h"
 
 #if defined(__AARCH64EL__)
 #ifdef HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS
@@ -217,7 +217,7 @@
 	ror1(RD);
 
 .globl _gcry_twofish_arm_encrypt_block
-.type   _gcry_twofish_arm_encrypt_block,%function;
+ELF(.type   _gcry_twofish_arm_encrypt_block,%function;)
 
 _gcry_twofish_arm_encrypt_block:
 	/* input:
@@ -263,10 +263,10 @@ _gcry_twofish_arm_encrypt_block:
 
 	ret;
 .ltorg
-.size _gcry_twofish_arm_encrypt_block,.-_gcry_twofish_arm_encrypt_block;
+ELF(.size _gcry_twofish_arm_encrypt_block,.-_gcry_twofish_arm_encrypt_block;)
 
 .globl _gcry_twofish_arm_decrypt_block
-.type   _gcry_twofish_arm_decrypt_block,%function;
+ELF(.type   _gcry_twofish_arm_decrypt_block,%function;)
 
 _gcry_twofish_arm_decrypt_block:
 	/* input:
@@ -311,7 +311,7 @@ _gcry_twofish_arm_decrypt_block:
 	str_output_le(RDST, RA, RB, RC, RD, RT0, RT1);
 
 	ret;
-.size _gcry_twofish_arm_decrypt_block,.-_gcry_twofish_arm_decrypt_block;
+ELF(.size _gcry_twofish_arm_decrypt_block,.-_gcry_twofish_arm_decrypt_block;)
 
 #endif /*HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS*/
 #endif /*__AARCH64EL__*/
diff --git a/configure.ac b/configure.ac
index b5d7211..330485f 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1119,10 +1119,6 @@ AC_CACHE_CHECK([whether GCC assembler is compatible for ARMv8/Aarch64 assembly i
                 "eor x0, x0, x30, ror #12;\n\t"
                 "add x0, x0, x30, asr #12;\n\t"
                 "eor v0.16b, v0.16b, v31.16b;\n\t"
-
-                /* Test if '.type' and '.size' are supported.  */
-                ".size asmfunc,.-asmfunc;\n\t"
-                ".type asmfunc, at function;\n\t"
             );]])],
           [gcry_cv_gcc_aarch64_platform_as_ok=yes])])
 if test "$gcry_cv_gcc_aarch64_platform_as_ok" = "yes" ; then
-- 
2.7.4




More information about the Gcrypt-devel mailing list