[PATCH 2/4] Add CFI unwind assembly directives for 64-bit ARM assembly
Jussi Kivilinna
jussi.kivilinna at iki.fi
Fri Apr 26 18:33:36 CEST 2019
* cipher/asm-common-aarch64.h (CFI_STARTPROC, CFI_ENDPROC)
(CFI_REMEMBER_STATE, CFI_RESTORE_STATE, CFI_ADJUST_CFA_OFFSET)
(CFI_REL_OFFSET, CFI_DEF_CFA_REGISTER, CFI_REGISTER, CFI_RESTORE)
(DW_REGNO_SP, DW_SLEB128_7BIT, DW_SLEB128_28BIT, CFI_CFA_ON_STACK)
(CFI_REG_ON_STACK): New.
* cipher/camellia-aarch64.S: Add CFI directives.
* cipher/chacha20-aarch64.S: Add CFI directives.
* cipher/cipher-gcm-armv8-aarch64-ce.S: Add CFI directives.
* cipher/crc-armv8-aarch64-ce.S: Add CFI directives.
* cipher/rijndael-aarch64.S: Add CFI directives.
* cipher/rijndael-armv8-aarch64-ce.S: Add CFI directives.
* cipher/sha1-armv8-aarch64-ce.S: Add CFI directives.
* cipher/sha256-armv8-aarch64-ce.S: Add CFI directives.
* cipher/twofish-aarch64.S: Add CFI directives.
* mpi/aarch64/mpih-add1.S: Add CFI directives.
* mpi/aarch64/mpih-mul1.S: Add CFI directives.
* mpi/aarch64/mpih-mul2.S: Add CFI directives.
* mpi/aarch64/mpih-mul3.S: Add CFI directives.
* mpi/aarch64/mpih-sub1.S: Add CFI directives.
* mpi/asm-common-aarch64.h: Include "../cipher/asm-common-aarch64.h".
(ELF): Remove.
--
This commit adds CFI directives that add DWARF unwinding information for
debugger to backtrace when executing code from 64-bit ARM assembly files.
Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>
---
0 files changed
diff --git a/cipher/asm-common-aarch64.h b/cipher/asm-common-aarch64.h
index 814b7ad16..502c35aeb 100644
--- a/cipher/asm-common-aarch64.h
+++ b/cipher/asm-common-aarch64.h
@@ -29,4 +29,62 @@
# define ELF(...) /*_*/
#endif
+#ifdef HAVE_GCC_ASM_CFI_DIRECTIVES
+/* CFI directives to emit DWARF stack unwinding information. */
+# define CFI_STARTPROC() .cfi_startproc
+# define CFI_ENDPROC() .cfi_endproc
+# define CFI_REMEMBER_STATE() .cfi_remember_state
+# define CFI_RESTORE_STATE() .cfi_restore_state
+# define CFI_ADJUST_CFA_OFFSET(off) .cfi_adjust_cfa_offset off
+# define CFI_REL_OFFSET(reg,off) .cfi_rel_offset reg, off
+# define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg
+# define CFI_REGISTER(ro,rn) .cfi_register ro, rn
+# define CFI_RESTORE(reg) .cfi_restore reg
+
+/* CFA expressions are used for pointing CFA and registers to
+ * SP relative offsets. */
+# define DW_REGNO_SP 31
+
+/* Fixed length encoding used for integers for now. */
+# define DW_SLEB128_7BIT(value) \
+ 0x00|((value) & 0x7f)
+# define DW_SLEB128_28BIT(value) \
+ 0x80|((value)&0x7f), \
+ 0x80|(((value)>>7)&0x7f), \
+ 0x80|(((value)>>14)&0x7f), \
+ 0x00|(((value)>>21)&0x7f)
+
+# define CFI_CFA_ON_STACK(rsp_offs,cfa_depth) \
+ .cfi_escape \
+ 0x0f, /* DW_CFA_def_cfa_expression */ \
+ DW_SLEB128_7BIT(11), /* length */ \
+ 0x8f, /* DW_OP_breg31, rsp + constant */ \
+ DW_SLEB128_28BIT(rsp_offs), \
+ 0x06, /* DW_OP_deref */ \
+ 0x23, /* DW_OP_plus_constu */ \
+ DW_SLEB128_28BIT((cfa_depth)+8)
+
+# define CFI_REG_ON_STACK(regno,rsp_offs) \
+ .cfi_escape \
+ 0x10, /* DW_CFA_expression */ \
+ DW_SLEB128_7BIT(regno), \
+ DW_SLEB128_7BIT(5), /* length */ \
+ 0x8f, /* DW_OP_breg31, rsp + constant */ \
+ DW_SLEB128_28BIT(rsp_offs)
+
+#else
+# define CFI_STARTPROC()
+# define CFI_ENDPROC()
+# define CFI_REMEMBER_STATE()
+# define CFI_RESTORE_STATE()
+# define CFI_ADJUST_CFA_OFFSET(off)
+# define CFI_REL_OFFSET(reg,off)
+# define CFI_DEF_CFA_REGISTER(reg)
+# define CFI_REGISTER(ro,rn)
+# define CFI_RESTORE(reg)
+
+# define CFI_CFA_ON_STACK(rsp_offs,cfa_depth)
+# define CFI_REG_ON_STACK(reg,rsp_offs)
+#endif
+
#endif /* GCRY_ASM_COMMON_AARCH64_H */
diff --git a/cipher/camellia-aarch64.S b/cipher/camellia-aarch64.S
index 5c6ab020a..f49808621 100644
--- a/cipher/camellia-aarch64.S
+++ b/cipher/camellia-aarch64.S
@@ -201,7 +201,12 @@
ELF(.type _gcry_camellia_arm_encrypt_block, at function;)
_gcry_camellia_arm_encrypt_block:
+ CFI_STARTPROC()
stp x19, x30, [sp, #-16]!
+ CFI_ADJUST_CFA_OFFSET(16)
+ CFI_REG_ON_STACK(19, 0)
+ CFI_REG_ON_STACK(30, 8)
+
/* input:
* x0: keytable
* x1: dst
@@ -228,8 +233,13 @@ _gcry_camellia_arm_encrypt_block:
outunpack(24);
+ CFI_REMEMBER_STATE()
ldp x19, x30, [sp], #16
+ CFI_ADJUST_CFA_OFFSET(-16)
+ CFI_RESTORE(x19)
+ CFI_RESTORE(x30)
ret;
+ CFI_RESTORE_STATE()
.ltorg
.Lenc_256:
@@ -239,7 +249,11 @@ _gcry_camellia_arm_encrypt_block:
outunpack(32);
ldp x19, x30, [sp], #16
+ CFI_ADJUST_CFA_OFFSET(-16)
+ CFI_RESTORE(x19)
+ CFI_RESTORE(x30)
ret;
+ CFI_ENDPROC()
.ltorg
ELF(.size _gcry_camellia_arm_encrypt_block,.-_gcry_camellia_arm_encrypt_block;)
@@ -247,7 +261,12 @@ ELF(.size _gcry_camellia_arm_encrypt_block,.-_gcry_camellia_arm_encrypt_block;)
ELF(.type _gcry_camellia_arm_decrypt_block, at function;)
_gcry_camellia_arm_decrypt_block:
+ CFI_STARTPROC()
stp x19, x30, [sp, #-16]!
+ CFI_ADJUST_CFA_OFFSET(16)
+ CFI_REG_ON_STACK(19, 0)
+ CFI_REG_ON_STACK(30, 8)
+
/* input:
* x0: keytable
* x1: dst
@@ -275,8 +294,13 @@ _gcry_camellia_arm_decrypt_block:
outunpack(0);
+ CFI_REMEMBER_STATE()
ldp x19, x30, [sp], #16
+ CFI_ADJUST_CFA_OFFSET(-16)
+ CFI_RESTORE(x19)
+ CFI_RESTORE(x30)
ret;
+ CFI_RESTORE_STATE()
.ltorg
.Ldec_256:
@@ -285,6 +309,7 @@ _gcry_camellia_arm_decrypt_block:
dec_fls(24);
b .Ldec_128;
+ CFI_ENDPROC()
.ltorg
ELF(.size _gcry_camellia_arm_decrypt_block,.-_gcry_camellia_arm_decrypt_block;)
diff --git a/cipher/chacha20-aarch64.S b/cipher/chacha20-aarch64.S
index 3844d4e10..adb9b1f29 100644
--- a/cipher/chacha20-aarch64.S
+++ b/cipher/chacha20-aarch64.S
@@ -163,6 +163,7 @@ _gcry_chacha20_aarch64_blocks4:
* x2: src
* x3: nblks (multiple of 4)
*/
+ CFI_STARTPROC()
GET_DATA_POINTER(CTR, .Linc_counter);
add INPUT_CTR, INPUT, #(12*4);
@@ -309,6 +310,7 @@ _gcry_chacha20_aarch64_blocks4:
eor x0, x0, x0
ret
+ CFI_ENDPROC()
ELF(.size _gcry_chacha20_aarch64_blocks4, .-_gcry_chacha20_aarch64_blocks4;)
#endif
diff --git a/cipher/cipher-gcm-armv8-aarch64-ce.S b/cipher/cipher-gcm-armv8-aarch64-ce.S
index b6c4f59d3..7c6be94ed 100644
--- a/cipher/cipher-gcm-armv8-aarch64-ce.S
+++ b/cipher/cipher-gcm-armv8-aarch64-ce.S
@@ -157,15 +157,23 @@ gcry_gcm_reduction_constant:
#define VPUSH_ABI \
stp d8, d9, [sp, #-16]!; \
+ CFI_ADJUST_CFA_OFFSET(16); \
stp d10, d11, [sp, #-16]!; \
+ CFI_ADJUST_CFA_OFFSET(16); \
stp d12, d13, [sp, #-16]!; \
- stp d14, d15, [sp, #-16]!;
+ CFI_ADJUST_CFA_OFFSET(16); \
+ stp d14, d15, [sp, #-16]!; \
+ CFI_ADJUST_CFA_OFFSET(16);
#define VPOP_ABI \
ldp d14, d15, [sp], #16; \
+ CFI_ADJUST_CFA_OFFSET(-16); \
ldp d12, d13, [sp], #16; \
+ CFI_ADJUST_CFA_OFFSET(-16); \
ldp d10, d11, [sp], #16; \
- ldp d8, d9, [sp], #16;
+ CFI_ADJUST_CFA_OFFSET(-16); \
+ ldp d8, d9, [sp], #16; \
+ CFI_ADJUST_CFA_OFFSET(-16);
/*
* unsigned int _gcry_ghash_armv8_ce_pmull (void *gcm_key, byte *result,
@@ -183,6 +191,8 @@ _gcry_ghash_armv8_ce_pmull:
* x3: nblocks
* x4: gcm_table
*/
+ CFI_STARTPROC();
+
cbz x3, .Ldo_nothing;
GET_DATA_POINTER(x5, .Lrconst)
@@ -360,6 +370,7 @@ _gcry_ghash_armv8_ce_pmull:
.Ldo_nothing:
mov x0, #0
ret
+ CFI_ENDPROC()
ELF(.size _gcry_ghash_armv8_ce_pmull,.-_gcry_ghash_armv8_ce_pmull;)
@@ -374,6 +385,7 @@ _gcry_ghash_setup_armv8_ce_pmull:
* x0: gcm_key
* x1: gcm_table
*/
+ CFI_STARTPROC()
GET_DATA_POINTER(x2, .Lrconst)
@@ -408,6 +420,7 @@ _gcry_ghash_setup_armv8_ce_pmull:
st1 {rh5.16b-rh6.16b}, [x1]
ret
+ CFI_ENDPROC()
ELF(.size _gcry_ghash_setup_armv8_ce_pmull,.-_gcry_ghash_setup_armv8_ce_pmull;)
#endif
diff --git a/cipher/crc-armv8-aarch64-ce.S b/cipher/crc-armv8-aarch64-ce.S
index 497d00551..f269b74a3 100644
--- a/cipher/crc-armv8-aarch64-ce.S
+++ b/cipher/crc-armv8-aarch64-ce.S
@@ -72,6 +72,7 @@ _gcry_crc32r_armv8_ce_bulk:
* x2: inlen
* x3: consts
*/
+ CFI_STARTPROC()
GET_DATA_POINTER(x7, .Lcrc32_constants)
add x9, x3, #consts_k(5 - 1)
@@ -230,6 +231,7 @@ _gcry_crc32r_armv8_ce_bulk:
st1 {v0.s}[2], [x0]
ret
+ CFI_ENDPROC()
ELF(.size _gcry_crc32r_armv8_ce_bulk,.-_gcry_crc32r_armv8_ce_bulk;)
/*
@@ -245,6 +247,7 @@ _gcry_crc32r_armv8_ce_reduction_4:
* w1: crc
* x2: crc32 constants
*/
+ CFI_STARTPROC()
eor v0.16b, v0.16b, v0.16b
add x2, x2, #consts_my_p(0)
@@ -261,6 +264,7 @@ _gcry_crc32r_armv8_ce_reduction_4:
mov w0, v0.s[1]
ret
+ CFI_ENDPROC()
ELF(.size _gcry_crc32r_armv8_ce_reduction_4,.-_gcry_crc32r_armv8_ce_reduction_4;)
/*
@@ -277,6 +281,7 @@ _gcry_crc32_armv8_ce_bulk:
* x2: inlen
* x3: consts
*/
+ CFI_STARTPROC()
GET_DATA_POINTER(x7, .Lcrc32_constants)
add x4, x7, #.Lcrc32_bswap_shuf - .Lcrc32_constants
@@ -456,6 +461,7 @@ _gcry_crc32_armv8_ce_bulk:
st1 {v0.s}[0], [x0]
ret
+ CFI_ENDPROC()
ELF(.size _gcry_crc32_armv8_ce_bulk,.-_gcry_crc32_armv8_ce_bulk;)
/*
@@ -471,6 +477,7 @@ _gcry_crc32_armv8_ce_reduction_4:
* w1: crc
* x2: crc32 constants
*/
+ CFI_STARTPROC()
eor v0.16b, v0.16b, v0.16b
add x2, x2, #consts_my_p(0)
@@ -487,6 +494,7 @@ _gcry_crc32_armv8_ce_reduction_4:
mov w0, v0.s[0]
ret
+ CFI_ENDPROC()
ELF(.size _gcry_crc32_armv8_ce_reduction_4,.-_gcry_crc32_armv8_ce_reduction_4;)
#endif
diff --git a/cipher/rijndael-aarch64.S b/cipher/rijndael-aarch64.S
index aad748753..e77dd4e0b 100644
--- a/cipher/rijndael-aarch64.S
+++ b/cipher/rijndael-aarch64.S
@@ -216,6 +216,7 @@ _gcry_aes_arm_encrypt_block:
* %w3: number of rounds.. 10, 12 or 14
* %x4: encryption table
*/
+ CFI_STARTPROC();
/* read input block */
@@ -285,6 +286,7 @@ _gcry_aes_arm_encrypt_block:
lastencround(11, RNA, RNB, RNC, RND, RA, RB, RC, RD);
b .Lenc_done;
+ CFI_ENDPROC();
ELF(.size _gcry_aes_arm_encrypt_block,.-_gcry_aes_arm_encrypt_block;)
#define addroundkey_dec(round, ra, rb, rc, rd, rna, rnb, rnc, rnd) \
@@ -439,6 +441,7 @@ _gcry_aes_arm_decrypt_block:
* %w3: number of rounds.. 10, 12 or 14
* %x4: decryption table
*/
+ CFI_STARTPROC();
/* read input block */
@@ -504,6 +507,7 @@ _gcry_aes_arm_decrypt_block:
decround(9, RA, RB, RC, RD, RNA, RNB, RNC, RND, preload_first_key);
b .Ldec_tail;
+ CFI_ENDPROC();
ELF(.size _gcry_aes_arm_decrypt_block,.-_gcry_aes_arm_decrypt_block;)
#endif /*HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS*/
diff --git a/cipher/rijndael-armv8-aarch64-ce.S b/cipher/rijndael-armv8-aarch64-ce.S
index f0012c20a..71b45b856 100644
--- a/cipher/rijndael-armv8-aarch64-ce.S
+++ b/cipher/rijndael-armv8-aarch64-ce.S
@@ -247,6 +247,7 @@ _gcry_aes_enc_armv8_ce:
* x2: src
* w3: nrounds
*/
+ CFI_STARTPROC();
aes_preload_keys(x0, w3);
@@ -291,6 +292,7 @@ _gcry_aes_enc_armv8_ce:
CLEAR_REG(vk13)
CLEAR_REG(vk14)
b .Lenc1_tail
+ CFI_ENDPROC();
ELF(.size _gcry_aes_enc_armv8_ce,.-_gcry_aes_enc_armv8_ce;)
@@ -309,6 +311,7 @@ _gcry_aes_dec_armv8_ce:
* x2: src
* w3: nrounds
*/
+ CFI_STARTPROC();
aes_preload_keys(x0, w3);
@@ -353,6 +356,7 @@ _gcry_aes_dec_armv8_ce:
CLEAR_REG(vk13)
CLEAR_REG(vk14)
b .Ldec1_tail
+ CFI_ENDPROC();
ELF(.size _gcry_aes_dec_armv8_ce,.-_gcry_aes_dec_armv8_ce;)
@@ -377,6 +381,7 @@ _gcry_aes_cbc_enc_armv8_ce:
* w5: cbc_mac
* w6: nrounds
*/
+ CFI_STARTPROC();
cbz x4, .Lcbc_enc_skip
@@ -419,6 +424,7 @@ _gcry_aes_cbc_enc_armv8_ce:
.Lcbc_enc_skip:
ret
+ CFI_ENDPROC();
ELF(.size _gcry_aes_cbc_enc_armv8_ce,.-_gcry_aes_cbc_enc_armv8_ce;)
/*
@@ -440,6 +446,7 @@ _gcry_aes_cbc_dec_armv8_ce:
* x4: nblocks
* w5: nrounds
*/
+ CFI_STARTPROC();
cbz x4, .Lcbc_dec_skip
@@ -515,6 +522,7 @@ _gcry_aes_cbc_dec_armv8_ce:
.Lcbc_dec_skip:
ret
+ CFI_ENDPROC();
ELF(.size _gcry_aes_cbc_dec_armv8_ce,.-_gcry_aes_cbc_dec_armv8_ce;)
@@ -537,6 +545,7 @@ _gcry_aes_ctr_enc_armv8_ce:
* x4: nblocks
* w5: nrounds
*/
+ CFI_STARTPROC();
cbz x4, .Lctr_enc_skip
@@ -668,7 +677,7 @@ _gcry_aes_ctr_enc_armv8_ce:
.Lctr_enc_skip:
ret
-
+ CFI_ENDPROC();
ELF(.size _gcry_aes_ctr_enc_armv8_ce,.-_gcry_aes_ctr_enc_armv8_ce;)
@@ -691,6 +700,7 @@ _gcry_aes_cfb_enc_armv8_ce:
* x4: nblocks
* w5: nrounds
*/
+ CFI_STARTPROC();
cbz x4, .Lcfb_enc_skip
@@ -732,6 +742,7 @@ _gcry_aes_cfb_enc_armv8_ce:
.Lcfb_enc_skip:
ret
+ CFI_ENDPROC();
ELF(.size _gcry_aes_cfb_enc_armv8_ce,.-_gcry_aes_cfb_enc_armv8_ce;)
@@ -754,6 +765,7 @@ _gcry_aes_cfb_dec_armv8_ce:
* x4: nblocks
* w5: nrounds
*/
+ CFI_STARTPROC();
cbz x4, .Lcfb_dec_skip
@@ -829,6 +841,7 @@ _gcry_aes_cfb_dec_armv8_ce:
.Lcfb_dec_skip:
ret
+ CFI_ENDPROC();
ELF(.size _gcry_aes_cfb_dec_armv8_ce,.-_gcry_aes_cfb_dec_armv8_ce;)
@@ -859,6 +872,7 @@ _gcry_aes_ocb_enc_armv8_ce:
* w7: nrounds
* %st+0: blkn => w12
*/
+ CFI_STARTPROC();
ldr w12, [sp]
ld1 {v0.16b}, [x3] /* load offset */
@@ -979,6 +993,7 @@ _gcry_aes_ocb_enc_armv8_ce:
CLEAR_REG(v16)
ret
+ CFI_ENDPROC();
ELF(.size _gcry_aes_ocb_enc_armv8_ce,.-_gcry_aes_ocb_enc_armv8_ce;)
@@ -1009,6 +1024,7 @@ _gcry_aes_ocb_dec_armv8_ce:
* w7: nrounds
* %st+0: blkn => w12
*/
+ CFI_STARTPROC();
ldr w12, [sp]
ld1 {v0.16b}, [x3] /* load offset */
@@ -1129,6 +1145,7 @@ _gcry_aes_ocb_dec_armv8_ce:
CLEAR_REG(v16)
ret
+ CFI_ENDPROC();
ELF(.size _gcry_aes_ocb_dec_armv8_ce,.-_gcry_aes_ocb_dec_armv8_ce;)
@@ -1157,6 +1174,8 @@ _gcry_aes_ocb_auth_armv8_ce:
* w6: nrounds => w7
* w7: blkn => w12
*/
+ CFI_STARTPROC();
+
mov w12, w7
mov w7, w6
mov x6, x5
@@ -1273,6 +1292,7 @@ _gcry_aes_ocb_auth_armv8_ce:
CLEAR_REG(v16)
ret
+ CFI_ENDPROC();
ELF(.size _gcry_aes_ocb_auth_armv8_ce,.-_gcry_aes_ocb_auth_armv8_ce;)
@@ -1297,6 +1317,7 @@ _gcry_aes_xts_enc_armv8_ce:
* x4: nblocks
* w5: nrounds
*/
+ CFI_STARTPROC();
cbz x4, .Lxts_enc_skip
@@ -1411,7 +1432,7 @@ _gcry_aes_xts_enc_armv8_ce:
.Lxts_enc_skip:
ret
-
+ CFI_ENDPROC();
ELF(.size _gcry_aes_xts_enc_armv8_ce,.-_gcry_aes_xts_enc_armv8_ce;)
@@ -1436,6 +1457,7 @@ _gcry_aes_xts_dec_armv8_ce:
* x4: nblocks
* w5: nrounds
*/
+ CFI_STARTPROC();
cbz x4, .Lxts_dec_skip
@@ -1550,7 +1572,7 @@ _gcry_aes_xts_dec_armv8_ce:
.Lxts_dec_skip:
ret
-
+ CFI_ENDPROC();
ELF(.size _gcry_aes_xts_dec_armv8_ce,.-_gcry_aes_xts_dec_armv8_ce;)
@@ -1564,6 +1586,7 @@ _gcry_aes_sbox4_armv8_ce:
/* See "Gouvêa, C. P. L. & López, J. Implementing GCM on ARMv8. Topics in
* Cryptology — CT-RSA 2015" for details.
*/
+ CFI_STARTPROC();
movi v0.16b, #0x52
movi v1.16b, #0
mov v0.S[0], w0
@@ -1572,6 +1595,7 @@ _gcry_aes_sbox4_armv8_ce:
mov w0, v0.S[0]
CLEAR_REG(v0)
ret
+ CFI_ENDPROC();
ELF(.size _gcry_aes_sbox4_armv8_ce,.-_gcry_aes_sbox4_armv8_ce;)
@@ -1582,11 +1606,13 @@ ELF(.size _gcry_aes_sbox4_armv8_ce,.-_gcry_aes_sbox4_armv8_ce;)
.globl _gcry_aes_invmixcol_armv8_ce
ELF(.type _gcry_aes_invmixcol_armv8_ce,%function;)
_gcry_aes_invmixcol_armv8_ce:
+ CFI_STARTPROC();
ld1 {v0.16b}, [x1]
aesimc v0.16b, v0.16b
st1 {v0.16b}, [x0]
CLEAR_REG(v0)
ret
+ CFI_ENDPROC();
ELF(.size _gcry_aes_invmixcol_armv8_ce,.-_gcry_aes_invmixcol_armv8_ce;)
#endif
diff --git a/cipher/sha1-armv8-aarch64-ce.S b/cipher/sha1-armv8-aarch64-ce.S
index aeb67a128..7dc26c0f1 100644
--- a/cipher/sha1-armv8-aarch64-ce.S
+++ b/cipher/sha1-armv8-aarch64-ce.S
@@ -110,6 +110,7 @@ _gcry_sha1_transform_armv8_ce:
* x1: data (64*nblks bytes)
* x2: nblks
*/
+ CFI_STARTPROC();
cbz x2, .Ldo_nothing;
@@ -199,6 +200,7 @@ _gcry_sha1_transform_armv8_ce:
.Ldo_nothing:
mov x0, #0
ret
+ CFI_ENDPROC();
ELF(.size _gcry_sha1_transform_armv8_ce,.-_gcry_sha1_transform_armv8_ce;)
#endif
diff --git a/cipher/sha256-armv8-aarch64-ce.S b/cipher/sha256-armv8-aarch64-ce.S
index 6b3ad32d8..706e0dfd9 100644
--- a/cipher/sha256-armv8-aarch64-ce.S
+++ b/cipher/sha256-armv8-aarch64-ce.S
@@ -120,6 +120,7 @@ _gcry_sha256_transform_armv8_ce:
* r1: data (64*nblks bytes)
* r2: nblks
*/
+ CFI_STARTPROC();
cbz x2, .Ldo_nothing;
@@ -213,6 +214,7 @@ _gcry_sha256_transform_armv8_ce:
.Ldo_nothing:
mov x0, #0
ret
+ CFI_ENDPROC();
ELF(.size _gcry_sha256_transform_armv8_ce,.-_gcry_sha256_transform_armv8_ce;)
#endif
diff --git a/cipher/twofish-aarch64.S b/cipher/twofish-aarch64.S
index adee412d7..9f35b5cde 100644
--- a/cipher/twofish-aarch64.S
+++ b/cipher/twofish-aarch64.S
@@ -225,6 +225,7 @@ _gcry_twofish_arm_encrypt_block:
* x1: dst
* x2: src
*/
+ CFI_STARTPROC();
add CTXw, CTX, #(w);
@@ -262,6 +263,7 @@ _gcry_twofish_arm_encrypt_block:
str_output_le(RDST, RC, RD, RA, RB, RT0, RT1);
ret;
+ CFI_ENDPROC();
.ltorg
ELF(.size _gcry_twofish_arm_encrypt_block,.-_gcry_twofish_arm_encrypt_block;)
@@ -274,6 +276,7 @@ _gcry_twofish_arm_decrypt_block:
* %r1: dst
* %r2: src
*/
+ CFI_STARTPROC();
add CTXw, CTX, #(w);
@@ -311,6 +314,7 @@ _gcry_twofish_arm_decrypt_block:
str_output_le(RDST, RA, RB, RC, RD, RT0, RT1);
ret;
+ CFI_ENDPROC();
ELF(.size _gcry_twofish_arm_decrypt_block,.-_gcry_twofish_arm_decrypt_block;)
#endif /*HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS*/
diff --git a/mpi/aarch64/mpih-add1.S b/mpi/aarch64/mpih-add1.S
index 3370320e0..bc62cf987 100644
--- a/mpi/aarch64/mpih-add1.S
+++ b/mpi/aarch64/mpih-add1.S
@@ -37,6 +37,7 @@
.globl _gcry_mpih_add_n
ELF(.type _gcry_mpih_add_n,%function)
_gcry_mpih_add_n:
+ CFI_STARTPROC()
and w5, w3, #3;
adds xzr, xzr, xzr; /* clear carry flag */
@@ -69,4 +70,5 @@ _gcry_mpih_add_n:
.Lend:
adc x0, xzr, xzr;
ret;
+ CFI_ENDPROC()
ELF(.size _gcry_mpih_add_n,.-_gcry_mpih_add_n;)
diff --git a/mpi/aarch64/mpih-mul1.S b/mpi/aarch64/mpih-mul1.S
index 8830845a7..92fcd141b 100644
--- a/mpi/aarch64/mpih-mul1.S
+++ b/mpi/aarch64/mpih-mul1.S
@@ -37,6 +37,7 @@
.globl _gcry_mpih_mul_1
ELF(.type _gcry_mpih_mul_1,%function)
_gcry_mpih_mul_1:
+ CFI_STARTPROC()
and w5, w2, #3;
mov x4, xzr;
@@ -94,4 +95,5 @@ _gcry_mpih_mul_1:
.Lend:
mov x0, x4;
ret;
+ CFI_ENDPROC()
ELF(.size _gcry_mpih_mul_1,.-_gcry_mpih_mul_1;)
diff --git a/mpi/aarch64/mpih-mul2.S b/mpi/aarch64/mpih-mul2.S
index 5d736990e..aa0e5a2d5 100644
--- a/mpi/aarch64/mpih-mul2.S
+++ b/mpi/aarch64/mpih-mul2.S
@@ -37,6 +37,7 @@
.globl _gcry_mpih_addmul_1
ELF(.type _gcry_mpih_addmul_1,%function)
_gcry_mpih_addmul_1:
+ CFI_STARTPROC()
and w5, w2, #3;
mov x6, xzr;
mov x7, xzr;
@@ -106,4 +107,5 @@ _gcry_mpih_addmul_1:
.Lend:
mov x0, x6;
ret;
+ CFI_ENDPROC()
ELF(.size _gcry_mpih_addmul_1,.-_gcry_mpih_addmul_1;)
diff --git a/mpi/aarch64/mpih-mul3.S b/mpi/aarch64/mpih-mul3.S
index f785e5e42..5a40b354c 100644
--- a/mpi/aarch64/mpih-mul3.S
+++ b/mpi/aarch64/mpih-mul3.S
@@ -37,6 +37,7 @@
.globl _gcry_mpih_submul_1
ELF(.type _gcry_mpih_submul_1,%function)
_gcry_mpih_submul_1:
+ CFI_STARTPROC()
and w5, w2, #3;
mov x7, xzr;
cbz w5, .Large_loop;
@@ -119,4 +120,5 @@ _gcry_mpih_submul_1:
.Loop_end:
cinc x0, x7, cc;
ret;
+ CFI_ENDPROC()
ELF(.size _gcry_mpih_submul_1,.-_gcry_mpih_submul_1;)
diff --git a/mpi/aarch64/mpih-sub1.S b/mpi/aarch64/mpih-sub1.S
index 45a7b0417..4f279a123 100644
--- a/mpi/aarch64/mpih-sub1.S
+++ b/mpi/aarch64/mpih-sub1.S
@@ -37,6 +37,7 @@
.globl _gcry_mpih_sub_n
ELF(.type _gcry_mpih_sub_n,%function)
_gcry_mpih_sub_n:
+ CFI_STARTPROC()
and w5, w3, #3;
subs xzr, xzr, xzr; /* prepare carry flag for sub */
@@ -69,4 +70,5 @@ _gcry_mpih_sub_n:
.Lend:
cset x0, cc;
ret;
+ CFI_ENDPROC()
ELF(.size _gcry_mpih_sub_n,.-_gcry_mpih_sub_n;)
diff --git a/mpi/asm-common-aarch64.h b/mpi/asm-common-aarch64.h
index 126941307..cf4bdb852 100644
--- a/mpi/asm-common-aarch64.h
+++ b/mpi/asm-common-aarch64.h
@@ -21,10 +21,6 @@
#ifndef MPI_ASM_COMMON_AARCH64_H
#define MPI_ASM_COMMON_AARCH64_H
-#ifdef __ELF__
-# define ELF(...) __VA_ARGS__
-#else
-# define ELF(...) /*_*/
-#endif
+#include "../cipher/asm-common-aarch64.h"
#endif /* MPI_ASM_COMMON_AARCH64_H */
More information about the Gcrypt-devel
mailing list