[PATCH] Add stitched ChaCha20-Poly1305 ARMv8/AArch64 implementation

Jussi Kivilinna jussi.kivilinna at iki.fi
Tue Sep 24 23:24:53 CEST 2019


* cipher/Makefile.am: Add 'asm-poly1305-aarch64.h'.
* cipher/asm-poly1305-aarch64.h: New.
* cipher/chacha20-aarch64.S (ROT8, _, ROTATE2_8): New.
(ROTATE2): Add interleave operator.
(QUARTERROUND2): Add interleave operators; Use ROTATE2_8.
(chacha20_data): Rename to...
(_gcry_chacha20_aarch64_blocks4_data_inc_counter): ...to this.
(_gcry_chacha20_aarch64_blocks4_data_rot8): New.
(_gcry_chacha20_aarch64_blocks4): Preload ROT8; Fill empty parameters
for QUARTERROUND2 interleave operators.
(_gcry_chacha20_poly1305_aarch64_blocks4): New.
* cipher/chacha20.c
[USE_AARCH64_SIMD] (_gcry_chacha20_poly1305_aarch64_blocks4): New.
(_gcry_chacha20_poly1305_encrypt, _gcry_chacha20_poly1305_decrypt)
[USE_AARCH64_SIMD]: Use stitched implementation if ctr->use_neon is
set.
--

Patch also make small tweak for regular ARMv8/AArch64 ChaCha20
implementation for 'rotate by 8' operation.

Benchmark on Cortex-A53 @ 1104 Mhz:

Before:
 CHACHA20       |  nanosecs/byte   mebibytes/sec   cycles/byte
     STREAM enc |      4.93 ns/B     193.5 MiB/s      5.44 c/B
     STREAM dec |      4.93 ns/B     193.6 MiB/s      5.44 c/B
   POLY1305 enc |      7.71 ns/B     123.7 MiB/s      8.51 c/B
   POLY1305 dec |      7.70 ns/B     123.8 MiB/s      8.50 c/B
  POLY1305 auth |      2.77 ns/B     343.7 MiB/s      3.06 c/B

After (chacha20 ~6% faster, chacha20-poly1305 ~29% faster):
 CHACHA20       |  nanosecs/byte   mebibytes/sec   cycles/byte
     STREAM enc |      4.65 ns/B     205.2 MiB/s      5.13 c/B
     STREAM dec |      4.65 ns/B     205.1 MiB/s      5.13 c/B
   POLY1305 enc |      5.97 ns/B     159.7 MiB/s      6.59 c/B
   POLY1305 dec |      5.92 ns/B     161.1 MiB/s      6.54 c/B
  POLY1305 auth |      2.78 ns/B     343.3 MiB/s      3.07 c/B

Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>
---
 0 files changed

diff --git a/cipher/Makefile.am b/cipher/Makefile.am
index bf13c199a..dc63a736f 100644
--- a/cipher/Makefile.am
+++ b/cipher/Makefile.am
@@ -70,8 +70,9 @@ libcipher_la_SOURCES = \
 	sha1.h
 
 EXTRA_libcipher_la_SOURCES = \
-	asm-common-amd64.h \
 	asm-common-aarch64.h \
+	asm-common-amd64.h \
+	asm-poly1305-aarch64.h \
 	asm-poly1305-amd64.h \
 	arcfour.c arcfour-amd64.S \
 	blowfish.c blowfish-amd64.S blowfish-arm.S \
diff --git a/cipher/asm-poly1305-aarch64.h b/cipher/asm-poly1305-aarch64.h
new file mode 100644
index 000000000..6c342bee7
--- /dev/null
+++ b/cipher/asm-poly1305-aarch64.h
@@ -0,0 +1,245 @@
+/* asm-common-aarch64.h  -  Poly1305 macros for ARMv8/AArch64 assembly
+ *
+ * Copyright (C) 2019 Jussi Kivilinna <jussi.kivilinna at iki.fi>
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef GCRY_ASM_POLY1305_AARCH64_H
+#define GCRY_ASM_POLY1305_AARCH64_H
+
+#include "asm-common-aarch64.h"
+
+#ifdef __AARCH64EL__
+  #define le_to_host(reg) /*_*/
+#else
+  #define le_to_host(reg) rev reg, reg;
+#endif
+
+/**********************************************************************
+  poly1305 for stitched chacha20-poly1305 Aarch64 implementations
+ **********************************************************************/
+
+#define POLY_RSTATE    x8
+#define POLY_RSRC      x9
+
+#define POLY_R_H0      x10
+#define POLY_R_H1      x11
+#define POLY_R_H2      x12
+#define POLY_R_H2d     w12
+#define POLY_R_R0      x13
+#define POLY_R_R1      x14
+#define POLY_R_R1_MUL5 x15
+#define POLY_R_X0_HI   x16
+#define POLY_R_X0_LO   x17
+#define POLY_R_X1_HI   x19
+#define POLY_R_X1_LO   x20
+#define POLY_R_ONE     x21
+#define POLY_R_ONEd    w21
+
+#define POLY_TMP0      x22
+#define POLY_TMP1      x23
+#define POLY_TMP2      x24
+#define POLY_TMP3      x25
+
+#define POLY_CHACHA_ROUND x26
+
+#define POLY_S_R0      (4 * 4 + 0 * 8)
+#define POLY_S_R1      (4 * 4 + 1 * 8)
+#define POLY_S_H0      (4 * 4 + 2 * 8 + 0 * 8)
+#define POLY_S_H1      (4 * 4 + 2 * 8 + 1 * 8)
+#define POLY_S_H2d     (4 * 4 + 2 * 8 + 2 * 8)
+
+#define POLY1305_PUSH_REGS() \
+	stp x19, x20, [sp, #-16]!; \
+	CFI_ADJUST_CFA_OFFSET(16); \
+	CFI_REG_ON_STACK(19, 0); \
+	CFI_REG_ON_STACK(20, 8); \
+	stp x21, x22, [sp, #-16]!; \
+	CFI_ADJUST_CFA_OFFSET(16); \
+	CFI_REG_ON_STACK(21, 0); \
+	CFI_REG_ON_STACK(22, 8); \
+	stp x23, x24, [sp, #-16]!; \
+	CFI_ADJUST_CFA_OFFSET(16); \
+	CFI_REG_ON_STACK(23, 0); \
+	CFI_REG_ON_STACK(24, 8); \
+	stp x25, x26, [sp, #-16]!; \
+	CFI_ADJUST_CFA_OFFSET(16); \
+	CFI_REG_ON_STACK(25, 0); \
+	CFI_REG_ON_STACK(26, 8);
+
+#define POLY1305_POP_REGS() \
+	ldp x25, x26, [sp], #16; \
+	CFI_ADJUST_CFA_OFFSET(-16); \
+	CFI_RESTORE(x25); \
+	CFI_RESTORE(x26); \
+	ldp x23, x24, [sp], #16; \
+	CFI_ADJUST_CFA_OFFSET(-16); \
+	CFI_RESTORE(x23); \
+	CFI_RESTORE(x24); \
+	ldp x21, x22, [sp], #16; \
+	CFI_ADJUST_CFA_OFFSET(-16); \
+	CFI_RESTORE(x21); \
+	CFI_RESTORE(x22); \
+	ldp x19, x20, [sp], #16; \
+	CFI_ADJUST_CFA_OFFSET(-16); \
+	CFI_RESTORE(x19); \
+	CFI_RESTORE(x20);
+
+#define POLY1305_LOAD_STATE() \
+	ldr POLY_R_R1, [POLY_RSTATE, #(POLY_S_R1)]; \
+	ldr POLY_R_H0, [POLY_RSTATE, #(POLY_S_H0)];  \
+	ldr POLY_R_H1, [POLY_RSTATE, #(POLY_S_H1)]; \
+	ldr POLY_R_H2d, [POLY_RSTATE, #(POLY_S_H2d)]; \
+	ldr POLY_R_R0, [POLY_RSTATE, #(POLY_S_R0)]; \
+	add POLY_R_R1_MUL5, POLY_R_R1, POLY_R_R1, lsr #2; \
+	mov POLY_R_ONE, #1;
+
+#define POLY1305_STORE_STATE() \
+	str POLY_R_H0, [POLY_RSTATE, #(POLY_S_H0)]; \
+	str POLY_R_H1, [POLY_RSTATE, #(POLY_S_H1)]; \
+	str POLY_R_H2d, [POLY_RSTATE, #(POLY_S_H2d)];
+
+#define POLY1305_BLOCK_PART1(src_offset) \
+	/* a = h + m */ \
+	ldr POLY_TMP0, [POLY_RSRC, #((src_offset) + 0 * 8)];
+#define POLY1305_BLOCK_PART2(src_offset) \
+	ldr POLY_TMP1, [POLY_RSRC, #((src_offset) + 1 * 8)];
+#define POLY1305_BLOCK_PART3() \
+	le_to_host(POLY_TMP0);
+#define POLY1305_BLOCK_PART4() \
+	le_to_host(POLY_TMP1);
+#define POLY1305_BLOCK_PART5() \
+	adds POLY_R_H0, POLY_R_H0, POLY_TMP0;
+#define POLY1305_BLOCK_PART6() \
+	adcs POLY_R_H1, POLY_R_H1, POLY_TMP1;
+#define POLY1305_BLOCK_PART7() \
+	adc POLY_R_H2d, POLY_R_H2d, POLY_R_ONEd;
+
+#define POLY1305_BLOCK_PART8() \
+	/* h = a * r (partial mod 2^130-5): */ \
+	mul POLY_R_X1_LO, POLY_R_H0, POLY_R_R1;   /* lo: h0 * r1 */
+#define POLY1305_BLOCK_PART9() \
+	mul POLY_TMP0, POLY_R_H1, POLY_R_R0;      /* lo: h1 * r0 */
+#define POLY1305_BLOCK_PART10() \
+	mul POLY_R_X0_LO, POLY_R_H0, POLY_R_R0;   /* lo: h0 * r0 */
+#define POLY1305_BLOCK_PART11() \
+	umulh POLY_R_X1_HI, POLY_R_H0, POLY_R_R1; /* hi: h0 * r1 */
+#define POLY1305_BLOCK_PART12() \
+	adds POLY_R_X1_LO, POLY_R_X1_LO, POLY_TMP0;
+#define POLY1305_BLOCK_PART13() \
+	umulh POLY_TMP1, POLY_R_H1, POLY_R_R0;    /* hi: h1 * r0 */
+#define POLY1305_BLOCK_PART14() \
+	mul POLY_TMP2, POLY_R_H1, POLY_R_R1_MUL5;   /* lo: h1 * r1 mod 2^130-5 */
+#define POLY1305_BLOCK_PART15() \
+	umulh POLY_R_X0_HI, POLY_R_H0, POLY_R_R0; /* hi: h0 * r0 */
+#define POLY1305_BLOCK_PART16() \
+	adc POLY_R_X1_HI, POLY_R_X1_HI, POLY_TMP1;
+#define POLY1305_BLOCK_PART17() \
+	umulh POLY_TMP3, POLY_R_H1, POLY_R_R1_MUL5; /* hi: h1 * r1 mod 2^130-5 */
+#define POLY1305_BLOCK_PART18() \
+	adds POLY_R_X0_LO, POLY_R_X0_LO, POLY_TMP2;
+#define POLY1305_BLOCK_PART19() \
+	mul POLY_R_H1, POLY_R_H2, POLY_R_R1_MUL5; /* h2 * r1 mod 2^130-5 */
+#define POLY1305_BLOCK_PART20() \
+	adc POLY_R_X0_HI, POLY_R_X0_HI, POLY_TMP3;
+#define POLY1305_BLOCK_PART21() \
+	mul POLY_R_H2, POLY_R_H2, POLY_R_R0;      /* h2 * r0 */
+#define POLY1305_BLOCK_PART22() \
+	adds POLY_R_H1, POLY_R_H1, POLY_R_X1_LO;
+#define POLY1305_BLOCK_PART23() \
+	adc POLY_R_H0, POLY_R_H2, POLY_R_X1_HI;
+
+#define POLY1305_BLOCK_PART24() \
+	/* carry propagation */ \
+	and POLY_R_H2, POLY_R_H0, #3;
+#define POLY1305_BLOCK_PART25() \
+	mov POLY_R_H0, POLY_R_H0, lsr #2;
+#define POLY1305_BLOCK_PART26() \
+	add POLY_R_H0, POLY_R_H0, POLY_R_H0, lsl #2;
+#define POLY1305_BLOCK_PART27() \
+	adds POLY_R_H0, POLY_R_H0, POLY_R_X0_LO;
+#define POLY1305_BLOCK_PART28() \
+	adcs POLY_R_H1, POLY_R_H1, POLY_R_X0_HI;
+#define POLY1305_BLOCK_PART29() \
+	adc POLY_R_H2d, POLY_R_H2d, wzr;
+
+//#define TESTING_POLY1305_ASM
+#ifdef TESTING_POLY1305_ASM
+/* for testing only. */
+.align 3
+.globl _gcry_poly1305_aarch64_blocks1
+ELF(.type _gcry_poly1305_aarch64_blocks1,%function;)
+_gcry_poly1305_aarch64_blocks1:
+	/* input:
+	 *	x0: poly1305-state
+	 *	x1: src
+	 *	x2: nblks
+	 */
+	CFI_STARTPROC()
+	POLY1305_PUSH_REGS();
+
+	mov POLY_RSTATE, x0;
+	mov POLY_RSRC, x1;
+
+	POLY1305_LOAD_STATE();
+
+.L_gcry_poly1305_aarch64_loop1:
+	POLY1305_BLOCK_PART1(0 * 16);
+	POLY1305_BLOCK_PART2(0 * 16);
+	add POLY_RSRC, POLY_RSRC, #16;
+	POLY1305_BLOCK_PART3();
+	POLY1305_BLOCK_PART4();
+	POLY1305_BLOCK_PART5();
+	POLY1305_BLOCK_PART6();
+	POLY1305_BLOCK_PART7();
+	POLY1305_BLOCK_PART8();
+	POLY1305_BLOCK_PART9();
+	POLY1305_BLOCK_PART10();
+	POLY1305_BLOCK_PART11();
+	POLY1305_BLOCK_PART12();
+	POLY1305_BLOCK_PART13();
+	POLY1305_BLOCK_PART14();
+	POLY1305_BLOCK_PART15();
+	POLY1305_BLOCK_PART16();
+	POLY1305_BLOCK_PART17();
+	POLY1305_BLOCK_PART18();
+	POLY1305_BLOCK_PART19();
+	POLY1305_BLOCK_PART20();
+	POLY1305_BLOCK_PART21();
+	POLY1305_BLOCK_PART22();
+	POLY1305_BLOCK_PART23();
+	POLY1305_BLOCK_PART24();
+	POLY1305_BLOCK_PART25();
+	POLY1305_BLOCK_PART26();
+	POLY1305_BLOCK_PART27();
+	POLY1305_BLOCK_PART28();
+	POLY1305_BLOCK_PART29();
+
+	subs x2, x2, #1;
+	b.ne .L_gcry_poly1305_aarch64_loop1;
+
+	POLY1305_STORE_STATE();
+
+	mov x0, #0;
+
+	POLY1305_POP_REGS();
+	ret;
+	CFI_ENDPROC()
+ELF(.size _gcry_poly1305_aarch64_blocks1, .-_gcry_poly1305_aarch64_blocks1;)
+#endif
+
+#endif /* GCRY_ASM_POLY1305_AARCH64_H */
diff --git a/cipher/chacha20-aarch64.S b/cipher/chacha20-aarch64.S
index 07b4bb5c0..7ace023fb 100644
--- a/cipher/chacha20-aarch64.S
+++ b/cipher/chacha20-aarch64.S
@@ -1,6 +1,6 @@
 /* chacha20-aarch64.S - ARMv8/AArch64 accelerated chacha20 blocks function
  *
- * Copyright (C) 2017,2018 Jussi Kivilinna <jussi.kivilinna at iki.fi>
+ * Copyright (C) 2017-2019 Jussi Kivilinna <jussi.kivilinna at iki.fi>
  *
  * This file is part of Libgcrypt.
  *
@@ -38,6 +38,7 @@
 
 .text
 
+#include "asm-poly1305-aarch64.h"
 
 /* register macros */
 #define INPUT     x0
@@ -74,11 +75,14 @@
 #define VTMP3   v4
 #define X12_TMP v5
 #define X13_TMP v6
+#define ROT8    v7
 
 /**********************************************************************
   helper macros
  **********************************************************************/
 
+#define _(...) __VA_ARGS__
+
 #define vpunpckldq(s1, s2, dst) \
 	zip1 dst.4s, s2.4s, s1.4s;
 
@@ -112,12 +116,18 @@
   4-way chacha20
  **********************************************************************/
 
-#define ROTATE2(dst1,dst2,c,src1,src2)		\
+#define ROTATE2(dst1,dst2,c,src1,src2,iop1)	\
 	shl dst1.4s, src1.4s, #(c);		\
 	shl dst2.4s, src2.4s, #(c);		\
+	iop1;					\
 	sri dst1.4s, src1.4s, #(32 - (c));	\
 	sri dst2.4s, src2.4s, #(32 - (c));
 
+#define ROTATE2_8(dst1,dst2,src1,src2,iop1)	\
+	tbl dst1.16b, {src1.16b}, ROT8.16b;     \
+	iop1;					\
+	tbl dst2.16b, {src2.16b}, ROT8.16b;
+
 #define ROTATE2_16(dst1,dst2,src1,src2)		\
 	rev32 dst1.8h, src1.8h;			\
 	rev32 dst2.8h, src2.8h;
@@ -128,21 +138,33 @@
 #define PLUS(ds,s) \
 	add ds.4s, ds.4s, s.4s;
 
-#define QUARTERROUND2(a1,b1,c1,d1,a2,b2,c2,d2,ign,tmp1,tmp2)		\
-	PLUS(a1,b1); PLUS(a2,b2); XOR(tmp1,d1,a1); XOR(tmp2,d2,a2);	\
-	    ROTATE2_16(d1, d2, tmp1, tmp2);				\
-	PLUS(c1,d1); PLUS(c2,d2); XOR(tmp1,b1,c1); XOR(tmp2,b2,c2);	\
-	    ROTATE2(b1, b2, 12, tmp1, tmp2);				\
-	PLUS(a1,b1); PLUS(a2,b2); XOR(tmp1,d1,a1); XOR(tmp2,d2,a2);	\
-	    ROTATE2(d1, d2,  8, tmp1, tmp2);				\
-	PLUS(c1,d1); PLUS(c2,d2); XOR(tmp1,b1,c1); XOR(tmp2,b2,c2);	\
-	    ROTATE2(b1, b2,  7, tmp1, tmp2);
-
-chacha20_data:
+#define QUARTERROUND2(a1,b1,c1,d1,a2,b2,c2,d2,ign,tmp1,tmp2,iop1,iop2,iop3,iop4,iop5,iop6,iop7,iop8,iop9,iop10,iop11,iop12,iop13,iop14) \
+	PLUS(a1,b1); PLUS(a2,b2); iop1;					\
+	    XOR(tmp1,d1,a1); XOR(tmp2,d2,a2); iop2;			\
+		ROTATE2_16(d1, d2, tmp1, tmp2); iop3;			\
+	PLUS(c1,d1); PLUS(c2,d2); iop4;					\
+	    XOR(tmp1,b1,c1); XOR(tmp2,b2,c2); iop5;			\
+		ROTATE2(b1, b2, 12, tmp1, tmp2, _(iop6)); iop7;		\
+	PLUS(a1,b1); PLUS(a2,b2); iop8;					\
+	    XOR(tmp1,d1,a1); XOR(tmp2,d2,a2); iop9;			\
+		ROTATE2_8(d1, d2, tmp1, tmp2, _(iop10)); iop11;		\
+	PLUS(c1,d1); PLUS(c2,d2); iop12;				\
+	    XOR(tmp1,b1,c1); XOR(tmp2,b2,c2); iop13;			\
+		ROTATE2(b1, b2,  7, tmp1, tmp2, _(iop14));
+
 .align 4
-.Linc_counter:
+.globl _gcry_chacha20_aarch64_blocks4_data_inc_counter
+_gcry_chacha20_aarch64_blocks4_data_inc_counter:
 	.long 0,1,2,3
 
+.align 4
+.globl _gcry_chacha20_aarch64_blocks4_data_rot8
+_gcry_chacha20_aarch64_blocks4_data_rot8:
+	.byte 3,0,1,2
+	.byte 7,4,5,6
+	.byte 11,8,9,10
+	.byte 15,12,13,14
+
 .align 3
 .globl _gcry_chacha20_aarch64_blocks4
 ELF(.type _gcry_chacha20_aarch64_blocks4,%function;)
@@ -156,8 +178,10 @@ _gcry_chacha20_aarch64_blocks4:
 	 */
 	CFI_STARTPROC()
 
-	GET_DATA_POINTER(CTR, .Linc_counter);
+	GET_DATA_POINTER(CTR, _gcry_chacha20_aarch64_blocks4_data_rot8);
 	add INPUT_CTR, INPUT, #(12*4);
+	ld1 {ROT8.16b}, [CTR];
+	GET_DATA_POINTER(CTR, _gcry_chacha20_aarch64_blocks4_data_inc_counter);
 	mov INPUT_POS, INPUT;
 	ld1 {VCTR.16b}, [CTR];
 
@@ -195,10 +219,14 @@ _gcry_chacha20_aarch64_blocks4:
 
 .Lround2:
 	subs ROUND, ROUND, #2
-	QUARTERROUND2(X0, X4,  X8, X12,   X1, X5,  X9, X13, tmp:=,VTMP0,VTMP1)
-	QUARTERROUND2(X2, X6, X10, X14,   X3, X7, X11, X15, tmp:=,VTMP0,VTMP1)
-	QUARTERROUND2(X0, X5, X10, X15,   X1, X6, X11, X12, tmp:=,VTMP0,VTMP1)
-	QUARTERROUND2(X2, X7,  X8, X13,   X3, X4,  X9, X14, tmp:=,VTMP0,VTMP1)
+	QUARTERROUND2(X0, X4,  X8, X12,   X1, X5,  X9, X13, tmp:=,VTMP0,VTMP1,
+		      ,,,,,,,,,,,,,)
+	QUARTERROUND2(X2, X6, X10, X14,   X3, X7, X11, X15, tmp:=,VTMP0,VTMP1,
+		      ,,,,,,,,,,,,,)
+	QUARTERROUND2(X0, X5, X10, X15,   X1, X6, X11, X12, tmp:=,VTMP0,VTMP1,
+		      ,,,,,,,,,,,,,)
+	QUARTERROUND2(X2, X7,  X8, X13,   X3, X4,  X9, X14, tmp:=,VTMP0,VTMP1,
+		      ,,,,,,,,,,,,,)
 	b.ne .Lround2;
 
 	ld1 {VTMP0.16b, VTMP1.16b}, [INPUT_POS], #32;
@@ -304,4 +332,285 @@ _gcry_chacha20_aarch64_blocks4:
 	CFI_ENDPROC()
 ELF(.size _gcry_chacha20_aarch64_blocks4, .-_gcry_chacha20_aarch64_blocks4;)
 
+/**********************************************************************
+  4-way stitched chacha20-poly1305
+ **********************************************************************/
+
+.align 3
+.globl _gcry_chacha20_poly1305_aarch64_blocks4
+ELF(.type _gcry_chacha20_poly1305_aarch64_blocks4,%function;)
+
+_gcry_chacha20_poly1305_aarch64_blocks4:
+	/* input:
+	 *	x0: input
+	 *	x1: dst
+	 *	x2: src
+	 *	x3: nblks (multiple of 4)
+	 *	x4: poly1305-state
+	 *	x5: poly1305-src
+	 */
+	CFI_STARTPROC()
+	POLY1305_PUSH_REGS()
+
+	mov POLY_RSTATE, x4;
+	mov POLY_RSRC, x5;
+
+	GET_DATA_POINTER(CTR, _gcry_chacha20_aarch64_blocks4_data_rot8);
+	add INPUT_CTR, INPUT, #(12*4);
+	ld1 {ROT8.16b}, [CTR];
+	GET_DATA_POINTER(CTR, _gcry_chacha20_aarch64_blocks4_data_inc_counter);
+	mov INPUT_POS, INPUT;
+	ld1 {VCTR.16b}, [CTR];
+
+	POLY1305_LOAD_STATE()
+
+.Loop_poly4:
+	/* Construct counter vectors X12 and X13 */
+
+	ld1 {X15.16b}, [INPUT_CTR];
+	ld1 {VTMP1.16b-VTMP3.16b}, [INPUT_POS];
+
+	dup X12.4s, X15.s[0];
+	dup X13.4s, X15.s[1];
+	ldr CTR, [INPUT_CTR];
+	add X12.4s, X12.4s, VCTR.4s;
+	dup X0.4s, VTMP1.s[0];
+	dup X1.4s, VTMP1.s[1];
+	dup X2.4s, VTMP1.s[2];
+	dup X3.4s, VTMP1.s[3];
+	dup X14.4s, X15.s[2];
+	cmhi VTMP0.4s, VCTR.4s, X12.4s;
+	dup X15.4s, X15.s[3];
+	add CTR, CTR, #4; /* Update counter */
+	dup X4.4s, VTMP2.s[0];
+	dup X5.4s, VTMP2.s[1];
+	dup X6.4s, VTMP2.s[2];
+	dup X7.4s, VTMP2.s[3];
+	sub X13.4s, X13.4s, VTMP0.4s;
+	dup X8.4s, VTMP3.s[0];
+	dup X9.4s, VTMP3.s[1];
+	dup X10.4s, VTMP3.s[2];
+	dup X11.4s, VTMP3.s[3];
+	mov X12_TMP.16b, X12.16b;
+	mov X13_TMP.16b, X13.16b;
+	str CTR, [INPUT_CTR];
+
+	mov ROUND, #20
+.Lround4_with_poly1305_outer:
+	mov POLY_CHACHA_ROUND, #6;
+.Lround4_with_poly1305_inner1:
+		      POLY1305_BLOCK_PART1(0 * 16)
+	QUARTERROUND2(X0, X4,  X8, X12,   X1, X5,  X9, X13, tmp:=,VTMP0,VTMP1,
+		      POLY1305_BLOCK_PART2(0 * 16),
+		      POLY1305_BLOCK_PART3(),
+		      POLY1305_BLOCK_PART4(),
+		      POLY1305_BLOCK_PART5(),
+		      POLY1305_BLOCK_PART6(),
+		      POLY1305_BLOCK_PART7(),
+		      POLY1305_BLOCK_PART8(),
+		      POLY1305_BLOCK_PART9(),
+		      POLY1305_BLOCK_PART10(),
+		      POLY1305_BLOCK_PART11(),
+		      POLY1305_BLOCK_PART12(),
+		      POLY1305_BLOCK_PART13(),
+		      POLY1305_BLOCK_PART14(),
+		      POLY1305_BLOCK_PART15())
+		      POLY1305_BLOCK_PART16()
+	QUARTERROUND2(X2, X6, X10, X14,   X3, X7, X11, X15, tmp:=,VTMP0,VTMP1,
+		      POLY1305_BLOCK_PART17(),
+		      POLY1305_BLOCK_PART18(),
+		      POLY1305_BLOCK_PART19(),
+		      POLY1305_BLOCK_PART20(),
+		      POLY1305_BLOCK_PART21(),
+		      POLY1305_BLOCK_PART22(),
+		      POLY1305_BLOCK_PART23(),
+		      POLY1305_BLOCK_PART24(),
+		      POLY1305_BLOCK_PART25(),
+		      POLY1305_BLOCK_PART26(),
+		      POLY1305_BLOCK_PART27(),
+		      POLY1305_BLOCK_PART28(),
+		      POLY1305_BLOCK_PART29(),
+		      POLY1305_BLOCK_PART1(1 * 16))
+		      POLY1305_BLOCK_PART2(1 * 16)
+	QUARTERROUND2(X0, X5, X10, X15,   X1, X6, X11, X12, tmp:=,VTMP0,VTMP1,
+		      _(add POLY_RSRC, POLY_RSRC, #(2*16)),
+		      POLY1305_BLOCK_PART3(),
+		      POLY1305_BLOCK_PART4(),
+		      POLY1305_BLOCK_PART5(),
+		      POLY1305_BLOCK_PART6(),
+		      POLY1305_BLOCK_PART7(),
+		      POLY1305_BLOCK_PART8(),
+		      POLY1305_BLOCK_PART9(),
+		      POLY1305_BLOCK_PART10(),
+		      POLY1305_BLOCK_PART11(),
+		      POLY1305_BLOCK_PART12(),
+		      POLY1305_BLOCK_PART13(),
+		      POLY1305_BLOCK_PART14(),
+		      POLY1305_BLOCK_PART15())
+		      POLY1305_BLOCK_PART16()
+	QUARTERROUND2(X2, X7,  X8, X13,   X3, X4,  X9, X14, tmp:=,VTMP0,VTMP1,
+		      POLY1305_BLOCK_PART17(),
+		      POLY1305_BLOCK_PART18(),
+		      POLY1305_BLOCK_PART19(),
+		      POLY1305_BLOCK_PART20(),
+		      POLY1305_BLOCK_PART21(),
+		      POLY1305_BLOCK_PART22(),
+		      POLY1305_BLOCK_PART23(),
+		      POLY1305_BLOCK_PART24(),
+		      POLY1305_BLOCK_PART25(),
+		      POLY1305_BLOCK_PART26(),
+		      POLY1305_BLOCK_PART27(),
+		      POLY1305_BLOCK_PART28(),
+		      POLY1305_BLOCK_PART29(),
+		      _(subs POLY_CHACHA_ROUND, POLY_CHACHA_ROUND, #2));
+	b.ne .Lround4_with_poly1305_inner1;
+
+	mov POLY_CHACHA_ROUND, #4;
+.Lround4_with_poly1305_inner2:
+		      POLY1305_BLOCK_PART1(0 * 16)
+	QUARTERROUND2(X0, X4,  X8, X12,   X1, X5,  X9, X13, tmp:=,VTMP0,VTMP1,,
+		      POLY1305_BLOCK_PART2(0 * 16),,
+		      _(add POLY_RSRC, POLY_RSRC, #(1*16)),,
+		      POLY1305_BLOCK_PART3(),,
+		      POLY1305_BLOCK_PART4(),,
+		      POLY1305_BLOCK_PART5(),,
+		      POLY1305_BLOCK_PART6(),,
+		      POLY1305_BLOCK_PART7())
+	QUARTERROUND2(X2, X6, X10, X14,   X3, X7, X11, X15, tmp:=,VTMP0,VTMP1,
+		      POLY1305_BLOCK_PART8(),,
+		      POLY1305_BLOCK_PART9(),,
+		      POLY1305_BLOCK_PART10(),,
+		      POLY1305_BLOCK_PART11(),,
+		      POLY1305_BLOCK_PART12(),,
+		      POLY1305_BLOCK_PART13(),,
+		      POLY1305_BLOCK_PART14(),)
+		      POLY1305_BLOCK_PART15()
+	QUARTERROUND2(X0, X5, X10, X15,   X1, X6, X11, X12, tmp:=,VTMP0,VTMP1,,
+		      POLY1305_BLOCK_PART16(),,
+		      POLY1305_BLOCK_PART17(),,
+		      POLY1305_BLOCK_PART18(),,
+		      POLY1305_BLOCK_PART19(),,
+		      POLY1305_BLOCK_PART20(),,
+		      POLY1305_BLOCK_PART21(),,
+		      POLY1305_BLOCK_PART22())
+	QUARTERROUND2(X2, X7,  X8, X13,   X3, X4,  X9, X14, tmp:=,VTMP0,VTMP1,
+		      POLY1305_BLOCK_PART23(),,
+		      POLY1305_BLOCK_PART24(),,
+		      POLY1305_BLOCK_PART25(),,
+		      POLY1305_BLOCK_PART26(),,
+		      POLY1305_BLOCK_PART27(),,
+		      POLY1305_BLOCK_PART28(),,
+		      POLY1305_BLOCK_PART29(),
+		      _(subs POLY_CHACHA_ROUND, POLY_CHACHA_ROUND, #2))
+	b.ne .Lround4_with_poly1305_inner2;
+
+	subs ROUND, ROUND, #10
+	b.ne .Lround4_with_poly1305_outer;
+
+	ld1 {VTMP0.16b, VTMP1.16b}, [INPUT_POS], #32;
+
+	PLUS(X12, X12_TMP);        /* INPUT + 12 * 4 + counter */
+	PLUS(X13, X13_TMP);        /* INPUT + 13 * 4 + counter */
+
+	dup VTMP2.4s, VTMP0.s[0]; /* INPUT + 0 * 4 */
+	dup VTMP3.4s, VTMP0.s[1]; /* INPUT + 1 * 4 */
+	dup X12_TMP.4s, VTMP0.s[2]; /* INPUT + 2 * 4 */
+	dup X13_TMP.4s, VTMP0.s[3]; /* INPUT + 3 * 4 */
+	PLUS(X0, VTMP2);
+	PLUS(X1, VTMP3);
+	PLUS(X2, X12_TMP);
+	PLUS(X3, X13_TMP);
+
+	dup VTMP2.4s, VTMP1.s[0]; /* INPUT + 4 * 4 */
+	dup VTMP3.4s, VTMP1.s[1]; /* INPUT + 5 * 4 */
+	dup X12_TMP.4s, VTMP1.s[2]; /* INPUT + 6 * 4 */
+	dup X13_TMP.4s, VTMP1.s[3]; /* INPUT + 7 * 4 */
+	ld1 {VTMP0.16b, VTMP1.16b}, [INPUT_POS];
+	mov INPUT_POS, INPUT;
+	PLUS(X4, VTMP2);
+	PLUS(X5, VTMP3);
+	PLUS(X6, X12_TMP);
+	PLUS(X7, X13_TMP);
+
+	dup VTMP2.4s, VTMP0.s[0]; /* INPUT + 8 * 4 */
+	dup VTMP3.4s, VTMP0.s[1]; /* INPUT + 9 * 4 */
+	dup X12_TMP.4s, VTMP0.s[2]; /* INPUT + 10 * 4 */
+	dup X13_TMP.4s, VTMP0.s[3]; /* INPUT + 11 * 4 */
+	dup VTMP0.4s, VTMP1.s[2]; /* INPUT + 14 * 4 */
+	dup VTMP1.4s, VTMP1.s[3]; /* INPUT + 15 * 4 */
+	PLUS(X8, VTMP2);
+	PLUS(X9, VTMP3);
+	PLUS(X10, X12_TMP);
+	PLUS(X11, X13_TMP);
+	PLUS(X14, VTMP0);
+	PLUS(X15, VTMP1);
+
+	transpose_4x4(X0, X1, X2, X3, VTMP0, VTMP1, VTMP2);
+	transpose_4x4(X4, X5, X6, X7, VTMP0, VTMP1, VTMP2);
+	transpose_4x4(X8, X9, X10, X11, VTMP0, VTMP1, VTMP2);
+	transpose_4x4(X12, X13, X14, X15, VTMP0, VTMP1, VTMP2);
+
+	subs NBLKS, NBLKS, #4;
+
+	ld1 {VTMP0.16b-VTMP3.16b}, [SRC], #64;
+	ld1 {X12_TMP.16b-X13_TMP.16b}, [SRC], #32;
+	eor VTMP0.16b, X0.16b, VTMP0.16b;
+	eor VTMP1.16b, X4.16b, VTMP1.16b;
+	eor VTMP2.16b, X8.16b, VTMP2.16b;
+	eor VTMP3.16b, X12.16b, VTMP3.16b;
+	eor X12_TMP.16b, X1.16b, X12_TMP.16b;
+	eor X13_TMP.16b, X5.16b, X13_TMP.16b;
+	st1 {VTMP0.16b-VTMP3.16b}, [DST], #64;
+	ld1 {VTMP0.16b-VTMP3.16b}, [SRC], #64;
+	st1 {X12_TMP.16b-X13_TMP.16b}, [DST], #32;
+	ld1 {X12_TMP.16b-X13_TMP.16b}, [SRC], #32;
+	eor VTMP0.16b, X9.16b, VTMP0.16b;
+	eor VTMP1.16b, X13.16b, VTMP1.16b;
+	eor VTMP2.16b, X2.16b, VTMP2.16b;
+	eor VTMP3.16b, X6.16b, VTMP3.16b;
+	eor X12_TMP.16b, X10.16b, X12_TMP.16b;
+	eor X13_TMP.16b, X14.16b, X13_TMP.16b;
+	st1 {VTMP0.16b-VTMP3.16b}, [DST], #64;
+	ld1 {VTMP0.16b-VTMP3.16b}, [SRC], #64;
+	st1 {X12_TMP.16b-X13_TMP.16b}, [DST], #32;
+	eor VTMP0.16b, X3.16b, VTMP0.16b;
+	eor VTMP1.16b, X7.16b, VTMP1.16b;
+	eor VTMP2.16b, X11.16b, VTMP2.16b;
+	eor VTMP3.16b, X15.16b, VTMP3.16b;
+	st1 {VTMP0.16b-VTMP3.16b}, [DST], #64;
+
+	b.ne .Loop_poly4;
+
+	POLY1305_STORE_STATE()
+
+	/* clear the used vector registers and stack */
+	clear(VTMP0);
+	clear(VTMP1);
+	clear(VTMP2);
+	clear(VTMP3);
+	clear(X12_TMP);
+	clear(X13_TMP);
+	clear(X0);
+	clear(X1);
+	clear(X2);
+	clear(X3);
+	clear(X4);
+	clear(X5);
+	clear(X6);
+	clear(X7);
+	clear(X8);
+	clear(X9);
+	clear(X10);
+	clear(X11);
+	clear(X12);
+	clear(X13);
+	clear(X14);
+	clear(X15);
+
+	eor x0, x0, x0
+	POLY1305_POP_REGS()
+	ret
+	CFI_ENDPROC()
+ELF(.size _gcry_chacha20_poly1305_aarch64_blocks4, .-_gcry_chacha20_poly1305_aarch64_blocks4;)
+
 #endif
diff --git a/cipher/chacha20.c b/cipher/chacha20.c
index b34d8d197..9d95723ba 100644
--- a/cipher/chacha20.c
+++ b/cipher/chacha20.c
@@ -185,6 +185,10 @@ unsigned int _gcry_chacha20_armv7_neon_blocks4(u32 *state, byte *dst,
 unsigned int _gcry_chacha20_aarch64_blocks4(u32 *state, byte *dst,
 					    const byte *src, size_t nblks);
 
+unsigned int _gcry_chacha20_poly1305_aarch64_blocks4(
+		u32 *state, byte *dst, const byte *src, size_t nblks,
+		void *poly1305_state, const byte *poly1305_src);
+
 #endif /* USE_AARCH64_SIMD */
 
 
@@ -688,6 +692,18 @@ _gcry_chacha20_poly1305_encrypt(gcry_cipher_hd_t c, byte *outbuf,
       inbuf  += 1 * CHACHA20_BLOCK_SIZE;
     }
 #endif
+#ifdef USE_AARCH64_SIMD
+  else if (ctx->use_neon && length >= CHACHA20_BLOCK_SIZE * 4)
+    {
+      nburn = _gcry_chacha20_aarch64_blocks4(ctx->input, outbuf, inbuf, 4);
+      burn = nburn > burn ? nburn : burn;
+
+      authptr = outbuf;
+      length -= 4 * CHACHA20_BLOCK_SIZE;
+      outbuf += 4 * CHACHA20_BLOCK_SIZE;
+      inbuf  += 4 * CHACHA20_BLOCK_SIZE;
+    }
+#endif
 #ifdef USE_PPC_VEC_POLY1305
   else if (ctx->use_ppc && length >= CHACHA20_BLOCK_SIZE * 4)
     {
@@ -763,6 +779,26 @@ _gcry_chacha20_poly1305_encrypt(gcry_cipher_hd_t c, byte *outbuf,
 	}
 #endif
 
+#ifdef USE_AARCH64_SIMD
+      if (ctx->use_neon &&
+	  length >= 4 * CHACHA20_BLOCK_SIZE &&
+	  authoffset >= 4 * CHACHA20_BLOCK_SIZE)
+	{
+	  size_t nblocks = length / CHACHA20_BLOCK_SIZE;
+	  nblocks -= nblocks % 4;
+
+	  nburn = _gcry_chacha20_poly1305_aarch64_blocks4(
+		      ctx->input, outbuf, inbuf, nblocks,
+		      &c->u_mode.poly1305.ctx.state, authptr);
+	  burn = nburn > burn ? nburn : burn;
+
+	  length  -= nblocks * CHACHA20_BLOCK_SIZE;
+	  outbuf  += nblocks * CHACHA20_BLOCK_SIZE;
+	  inbuf   += nblocks * CHACHA20_BLOCK_SIZE;
+	  authptr += nblocks * CHACHA20_BLOCK_SIZE;
+	}
+#endif
+
 #ifdef USE_PPC_VEC_POLY1305
       if (ctx->use_ppc &&
 	  length >= 4 * CHACHA20_BLOCK_SIZE &&
@@ -913,6 +949,23 @@ _gcry_chacha20_poly1305_decrypt(gcry_cipher_hd_t c, byte *outbuf,
     }
 #endif
 
+#ifdef USE_AARCH64_SIMD
+  if (ctx->use_neon && length >= 4 * CHACHA20_BLOCK_SIZE)
+    {
+      size_t nblocks = length / CHACHA20_BLOCK_SIZE;
+      nblocks -= nblocks % 4;
+
+      nburn = _gcry_chacha20_poly1305_aarch64_blocks4(
+			ctx->input, outbuf, inbuf, nblocks,
+			&c->u_mode.poly1305.ctx.state, inbuf);
+      burn = nburn > burn ? nburn : burn;
+
+      length -= nblocks * CHACHA20_BLOCK_SIZE;
+      outbuf += nblocks * CHACHA20_BLOCK_SIZE;
+      inbuf  += nblocks * CHACHA20_BLOCK_SIZE;
+    }
+#endif
+
 #ifdef USE_PPC_VEC_POLY1305
   if (ctx->use_ppc && length >= 4 * CHACHA20_BLOCK_SIZE)
     {




More information about the Gcrypt-devel mailing list