[PATCH 2/3] chacha20: add SSSE3 assembly implementation

Jussi Kivilinna jussi.kivilinna at iki.fi
Sat May 3 19:26:04 CEST 2014


* cipher/Makefile.am: Add 'chacha20-ssse3-amd64.S'.
* cipher/chacha20-ssse3-amd64.S: New.
* cipher/chacha20.c (USE_SSSE3): New macro.
(CHACHA20_context_s): Add 'use_ssse3'.
[USE_SSSE3] (_gcry_chacha20_amd64_ssse3_blocks)
(chacha20_core_ssse3): New.
(chacha20_do_setkey): Check HW features for SSSE3.
(chacha20_do_encrypt_stream) [USE_SSSE3]: Use SSSE3 optimized
implementation, if enabled.
* configure.ac [host=x86-64]: Add 'chacha20-ssse3-amd64.lo'.
--

Add SSSE3 optimized implementation for ChaCha20. Based on implementation
by Andrew Moon.

Before (Intel Haswell):

 CHACHA20       |  nanosecs/byte   mebibytes/sec   cycles/byte
     STREAM enc |      2.16 ns/B     441.4 MiB/s      6.91 c/B
     STREAM dec |      2.16 ns/B     441.4 MiB/s      6.91 c/B

After:

 CHACHA20       |  nanosecs/byte   mebibytes/sec   cycles/byte
     STREAM enc |     0.742 ns/B    1284.8 MiB/s      2.38 c/B
     STREAM dec |     0.741 ns/B    1286.5 MiB/s      2.37 c/B

Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>
---
 cipher/Makefile.am            |    2 
 cipher/chacha20-ssse3-amd64.S |  610 +++++++++++++++++++++++++++++++++++++++++
 cipher/chacha20.c             |   48 +++
 configure.ac                  |    7 
 4 files changed, 665 insertions(+), 2 deletions(-)
 create mode 100644 cipher/chacha20-ssse3-amd64.S

diff --git a/cipher/Makefile.am b/cipher/Makefile.am
index bc7959a..27ca7ac 100644
--- a/cipher/Makefile.am
+++ b/cipher/Makefile.am
@@ -59,7 +59,7 @@ EXTRA_libcipher_la_SOURCES = \
 arcfour.c arcfour-amd64.S \
 blowfish.c blowfish-amd64.S blowfish-arm.S \
 cast5.c cast5-amd64.S cast5-arm.S \
-chacha20.c \
+chacha20.c chacha20-ssse3-amd64.S \
 crc.c \
 des.c des-amd64.S \
 dsa.c \
diff --git a/cipher/chacha20-ssse3-amd64.S b/cipher/chacha20-ssse3-amd64.S
new file mode 100644
index 0000000..aaa7e5b
--- /dev/null
+++ b/cipher/chacha20-ssse3-amd64.S
@@ -0,0 +1,610 @@
+/* chacha20-ssse3-amd64.S  -  AMD64/SSSE3 implementation of ChaCha20
+ *
+ * Copyright (C) 2014 Jussi Kivilinna <jussi.kivilinna at iki.fi>
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Based on public domain implementation by Andrew Moon at
+ *  https://github.com/floodyberry/chacha-opt
+ */
+
+#ifdef __x86_64__
+#include <config.h>
+
+#if defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) && \
+    defined(HAVE_GCC_INLINE_ASM_SSSE3) && USE_CHACHA20
+
+#ifdef __PIC__
+#  define RIP (%rip)
+#else
+#  define RIP
+#endif
+
+.text
+
+.align 8
+.globl _gcry_chacha20_amd64_ssse3_blocks
+.type  _gcry_chacha20_amd64_ssse3_blocks, at function;
+_gcry_chacha20_amd64_ssse3_blocks:
+.Lchacha_blocks_ssse3_local:
+	pushq %rbx
+	pushq %rbp
+	movq %rsp, %rbp
+	andq $~63, %rsp
+	subq $512, %rsp
+	leaq .LC RIP, %rax
+	movdqa 0(%rax), %xmm6
+	movdqa 16(%rax), %xmm7
+	movdqu 0(%rdi), %xmm8
+	movdqu 16(%rdi), %xmm9
+	movdqu 32(%rdi), %xmm10
+	movdqu 48(%rdi), %xmm11
+	movl $20, %eax
+	movq $1, %r9
+	movdqa %xmm8, 0(%rsp)
+	movdqa %xmm9, 16(%rsp)
+	movdqa %xmm10, 32(%rsp)
+	movdqa %xmm11, 48(%rsp)
+	movdqa %xmm6, 80(%rsp)
+	movdqa %xmm7, 96(%rsp)
+	movq %rax, 64(%rsp)
+	cmpq $256, %rcx
+	jb .Lchacha_blocks_ssse3_below256
+	pshufd $0x00, %xmm8, %xmm0
+	pshufd $0x55, %xmm8, %xmm1
+	pshufd $0xaa, %xmm8, %xmm2
+	pshufd $0xff, %xmm8, %xmm3
+	movdqa %xmm0, 128(%rsp)
+	movdqa %xmm1, 144(%rsp)
+	movdqa %xmm2, 160(%rsp)
+	movdqa %xmm3, 176(%rsp)
+	pshufd $0x00, %xmm9, %xmm0
+	pshufd $0x55, %xmm9, %xmm1
+	pshufd $0xaa, %xmm9, %xmm2
+	pshufd $0xff, %xmm9, %xmm3
+	movdqa %xmm0, 192(%rsp)
+	movdqa %xmm1, 208(%rsp)
+	movdqa %xmm2, 224(%rsp)
+	movdqa %xmm3, 240(%rsp)
+	pshufd $0x00, %xmm10, %xmm0
+	pshufd $0x55, %xmm10, %xmm1
+	pshufd $0xaa, %xmm10, %xmm2
+	pshufd $0xff, %xmm10, %xmm3
+	movdqa %xmm0, 256(%rsp)
+	movdqa %xmm1, 272(%rsp)
+	movdqa %xmm2, 288(%rsp)
+	movdqa %xmm3, 304(%rsp)
+	pshufd $0xaa, %xmm11, %xmm0
+	pshufd $0xff, %xmm11, %xmm1
+	movdqa %xmm0, 352(%rsp)
+	movdqa %xmm1, 368(%rsp)
+	jmp .Lchacha_blocks_ssse3_atleast256
+.p2align 6,,63
+	# align to 4 mod 64
+	nop;nop;nop;nop;
+.Lchacha_blocks_ssse3_atleast256:
+	movq 48(%rsp), %rax
+	leaq 1(%rax), %r8
+	leaq 2(%rax), %r9
+	leaq 3(%rax), %r10
+	leaq 4(%rax), %rbx
+	movl %eax, 320(%rsp)
+	movl %r8d, 4+320(%rsp)
+	movl %r9d, 8+320(%rsp)
+	movl %r10d, 12+320(%rsp)
+	shrq $32, %rax
+	shrq $32, %r8
+	shrq $32, %r9
+	shrq $32, %r10
+	movl %eax, 336(%rsp)
+	movl %r8d, 4+336(%rsp)
+	movl %r9d, 8+336(%rsp)
+	movl %r10d, 12+336(%rsp)
+	movq %rbx, 48(%rsp)
+	movq 64(%rsp), %rax
+	movdqa 128(%rsp), %xmm0
+	movdqa 144(%rsp), %xmm1
+	movdqa 160(%rsp), %xmm2
+	movdqa 176(%rsp), %xmm3
+	movdqa 192(%rsp), %xmm4
+	movdqa 208(%rsp), %xmm5
+	movdqa 224(%rsp), %xmm6
+	movdqa 240(%rsp), %xmm7
+	movdqa 256(%rsp), %xmm8
+	movdqa 272(%rsp), %xmm9
+	movdqa 288(%rsp), %xmm10
+	movdqa 304(%rsp), %xmm11
+	movdqa 320(%rsp), %xmm12
+	movdqa 336(%rsp), %xmm13
+	movdqa 352(%rsp), %xmm14
+	movdqa 368(%rsp), %xmm15
+.Lchacha_blocks_ssse3_mainloop1:
+	paddd %xmm4, %xmm0
+	paddd %xmm5, %xmm1
+	pxor %xmm0, %xmm12
+	pxor %xmm1, %xmm13
+	paddd %xmm6, %xmm2
+	paddd %xmm7, %xmm3
+	pxor %xmm2, %xmm14
+	pxor %xmm3, %xmm15
+	pshufb 80(%rsp), %xmm12
+	pshufb 80(%rsp), %xmm13
+	paddd %xmm12, %xmm8
+	paddd %xmm13, %xmm9
+	pshufb 80(%rsp), %xmm14
+	pshufb 80(%rsp), %xmm15
+	paddd %xmm14, %xmm10
+	paddd %xmm15, %xmm11
+	movdqa %xmm12, 112(%rsp)
+	pxor %xmm8, %xmm4
+	pxor %xmm9, %xmm5
+	movdqa %xmm4, %xmm12
+	pslld $ 12, %xmm4
+	psrld $20, %xmm12
+	pxor %xmm12, %xmm4
+	movdqa %xmm5, %xmm12
+	pslld $ 12, %xmm5
+	psrld $20, %xmm12
+	pxor %xmm12, %xmm5
+	pxor %xmm10, %xmm6
+	pxor %xmm11, %xmm7
+	movdqa %xmm6, %xmm12
+	pslld $ 12, %xmm6
+	psrld $20, %xmm12
+	pxor %xmm12, %xmm6
+	movdqa %xmm7, %xmm12
+	pslld $ 12, %xmm7
+	psrld $20, %xmm12
+	pxor %xmm12, %xmm7
+	movdqa 112(%rsp), %xmm12
+	paddd %xmm4, %xmm0
+	paddd %xmm5, %xmm1
+	pxor %xmm0, %xmm12
+	pxor %xmm1, %xmm13
+	paddd %xmm6, %xmm2
+	paddd %xmm7, %xmm3
+	pxor %xmm2, %xmm14
+	pxor %xmm3, %xmm15
+	pshufb 96(%rsp), %xmm12
+	pshufb 96(%rsp), %xmm13
+	paddd %xmm12, %xmm8
+	paddd %xmm13, %xmm9
+	pshufb 96(%rsp), %xmm14
+	pshufb 96(%rsp), %xmm15
+	paddd %xmm14, %xmm10
+	paddd %xmm15, %xmm11
+	movdqa %xmm12, 112(%rsp)
+	pxor %xmm8, %xmm4
+	pxor %xmm9, %xmm5
+	movdqa %xmm4, %xmm12
+	pslld $ 7, %xmm4
+	psrld $25, %xmm12
+	pxor %xmm12, %xmm4
+	movdqa %xmm5, %xmm12
+	pslld $ 7, %xmm5
+	psrld $25, %xmm12
+	pxor %xmm12, %xmm5
+	pxor %xmm10, %xmm6
+	pxor %xmm11, %xmm7
+	movdqa %xmm6, %xmm12
+	pslld $ 7, %xmm6
+	psrld $25, %xmm12
+	pxor %xmm12, %xmm6
+	movdqa %xmm7, %xmm12
+	pslld $ 7, %xmm7
+	psrld $25, %xmm12
+	pxor %xmm12, %xmm7
+	movdqa 112(%rsp), %xmm12
+	paddd %xmm5, %xmm0
+	paddd %xmm6, %xmm1
+	pxor %xmm0, %xmm15
+	pxor %xmm1, %xmm12
+	paddd %xmm7, %xmm2
+	paddd %xmm4, %xmm3
+	pxor %xmm2, %xmm13
+	pxor %xmm3, %xmm14
+	pshufb 80(%rsp), %xmm15
+	pshufb 80(%rsp), %xmm12
+	paddd %xmm15, %xmm10
+	paddd %xmm12, %xmm11
+	pshufb 80(%rsp), %xmm13
+	pshufb 80(%rsp), %xmm14
+	paddd %xmm13, %xmm8
+	paddd %xmm14, %xmm9
+	movdqa %xmm15, 112(%rsp)
+	pxor %xmm10, %xmm5
+	pxor %xmm11, %xmm6
+	movdqa %xmm5, %xmm15
+	pslld $ 12, %xmm5
+	psrld $20, %xmm15
+	pxor %xmm15, %xmm5
+	movdqa %xmm6, %xmm15
+	pslld $ 12, %xmm6
+	psrld $20, %xmm15
+	pxor %xmm15, %xmm6
+	pxor %xmm8, %xmm7
+	pxor %xmm9, %xmm4
+	movdqa %xmm7, %xmm15
+	pslld $ 12, %xmm7
+	psrld $20, %xmm15
+	pxor %xmm15, %xmm7
+	movdqa %xmm4, %xmm15
+	pslld $ 12, %xmm4
+	psrld $20, %xmm15
+	pxor %xmm15, %xmm4
+	movdqa 112(%rsp), %xmm15
+	paddd %xmm5, %xmm0
+	paddd %xmm6, %xmm1
+	pxor %xmm0, %xmm15
+	pxor %xmm1, %xmm12
+	paddd %xmm7, %xmm2
+	paddd %xmm4, %xmm3
+	pxor %xmm2, %xmm13
+	pxor %xmm3, %xmm14
+	pshufb 96(%rsp), %xmm15
+	pshufb 96(%rsp), %xmm12
+	paddd %xmm15, %xmm10
+	paddd %xmm12, %xmm11
+	pshufb 96(%rsp), %xmm13
+	pshufb 96(%rsp), %xmm14
+	paddd %xmm13, %xmm8
+	paddd %xmm14, %xmm9
+	movdqa %xmm15, 112(%rsp)
+	pxor %xmm10, %xmm5
+	pxor %xmm11, %xmm6
+	movdqa %xmm5, %xmm15
+	pslld $ 7, %xmm5
+	psrld $25, %xmm15
+	pxor %xmm15, %xmm5
+	movdqa %xmm6, %xmm15
+	pslld $ 7, %xmm6
+	psrld $25, %xmm15
+	pxor %xmm15, %xmm6
+	pxor %xmm8, %xmm7
+	pxor %xmm9, %xmm4
+	movdqa %xmm7, %xmm15
+	pslld $ 7, %xmm7
+	psrld $25, %xmm15
+	pxor %xmm15, %xmm7
+	movdqa %xmm4, %xmm15
+	pslld $ 7, %xmm4
+	psrld $25, %xmm15
+	pxor %xmm15, %xmm4
+	subq $2, %rax
+	movdqa 112(%rsp), %xmm15
+	jnz .Lchacha_blocks_ssse3_mainloop1
+	paddd 128(%rsp), %xmm0
+	paddd 144(%rsp), %xmm1
+	paddd 160(%rsp), %xmm2
+	paddd 176(%rsp), %xmm3
+	paddd 192(%rsp), %xmm4
+	paddd 208(%rsp), %xmm5
+	paddd 224(%rsp), %xmm6
+	paddd 240(%rsp), %xmm7
+	paddd 256(%rsp), %xmm8
+	paddd 272(%rsp), %xmm9
+	paddd 288(%rsp), %xmm10
+	paddd 304(%rsp), %xmm11
+	paddd 320(%rsp), %xmm12
+	paddd 336(%rsp), %xmm13
+	paddd 352(%rsp), %xmm14
+	paddd 368(%rsp), %xmm15
+	movdqa %xmm8, 384(%rsp)
+	movdqa %xmm9, 400(%rsp)
+	movdqa %xmm10, 416(%rsp)
+	movdqa %xmm11, 432(%rsp)
+	movdqa %xmm12, 448(%rsp)
+	movdqa %xmm13, 464(%rsp)
+	movdqa %xmm14, 480(%rsp)
+	movdqa %xmm15, 496(%rsp)
+	movdqa %xmm0, %xmm8
+	movdqa %xmm2, %xmm9
+	movdqa %xmm4, %xmm10
+	movdqa %xmm6, %xmm11
+	punpckhdq %xmm1, %xmm0
+	punpckhdq %xmm3, %xmm2
+	punpckhdq %xmm5, %xmm4
+	punpckhdq %xmm7, %xmm6
+	punpckldq %xmm1, %xmm8
+	punpckldq %xmm3, %xmm9
+	punpckldq %xmm5, %xmm10
+	punpckldq %xmm7, %xmm11
+	movdqa %xmm0, %xmm1
+	movdqa %xmm4, %xmm3
+	movdqa %xmm8, %xmm5
+	movdqa %xmm10, %xmm7
+	punpckhqdq %xmm2, %xmm0
+	punpckhqdq %xmm6, %xmm4
+	punpckhqdq %xmm9, %xmm8
+	punpckhqdq %xmm11, %xmm10
+	punpcklqdq %xmm2, %xmm1
+	punpcklqdq %xmm6, %xmm3
+	punpcklqdq %xmm9, %xmm5
+	punpcklqdq %xmm11, %xmm7
+	andq %rsi, %rsi
+	jz .Lchacha_blocks_ssse3_noinput1
+	movdqu 0(%rsi), %xmm2
+	movdqu 16(%rsi), %xmm6
+	movdqu 64(%rsi), %xmm9
+	movdqu 80(%rsi), %xmm11
+	movdqu 128(%rsi), %xmm12
+	movdqu 144(%rsi), %xmm13
+	movdqu 192(%rsi), %xmm14
+	movdqu 208(%rsi), %xmm15
+	pxor %xmm2, %xmm5
+	pxor %xmm6, %xmm7
+	pxor %xmm9, %xmm8
+	pxor %xmm11, %xmm10
+	pxor %xmm12, %xmm1
+	pxor %xmm13, %xmm3
+	pxor %xmm14, %xmm0
+	pxor %xmm15, %xmm4
+	movdqu %xmm5, 0(%rdx)
+	movdqu %xmm7, 16(%rdx)
+	movdqu %xmm8, 64(%rdx)
+	movdqu %xmm10, 80(%rdx)
+	movdqu %xmm1, 128(%rdx)
+	movdqu %xmm3, 144(%rdx)
+	movdqu %xmm0, 192(%rdx)
+	movdqu %xmm4, 208(%rdx)
+	movdqa 384(%rsp), %xmm0
+	movdqa 400(%rsp), %xmm1
+	movdqa 416(%rsp), %xmm2
+	movdqa 432(%rsp), %xmm3
+	movdqa 448(%rsp), %xmm4
+	movdqa 464(%rsp), %xmm5
+	movdqa 480(%rsp), %xmm6
+	movdqa 496(%rsp), %xmm7
+	movdqa %xmm0, %xmm8
+	movdqa %xmm2, %xmm9
+	movdqa %xmm4, %xmm10
+	movdqa %xmm6, %xmm11
+	punpckldq %xmm1, %xmm8
+	punpckldq %xmm3, %xmm9
+	punpckhdq %xmm1, %xmm0
+	punpckhdq %xmm3, %xmm2
+	punpckldq %xmm5, %xmm10
+	punpckldq %xmm7, %xmm11
+	punpckhdq %xmm5, %xmm4
+	punpckhdq %xmm7, %xmm6
+	movdqa %xmm8, %xmm1
+	movdqa %xmm0, %xmm3
+	movdqa %xmm10, %xmm5
+	movdqa %xmm4, %xmm7
+	punpcklqdq %xmm9, %xmm1
+	punpcklqdq %xmm11, %xmm5
+	punpckhqdq %xmm9, %xmm8
+	punpckhqdq %xmm11, %xmm10
+	punpcklqdq %xmm2, %xmm3
+	punpcklqdq %xmm6, %xmm7
+	punpckhqdq %xmm2, %xmm0
+	punpckhqdq %xmm6, %xmm4
+	movdqu 32(%rsi), %xmm2
+	movdqu 48(%rsi), %xmm6
+	movdqu 96(%rsi), %xmm9
+	movdqu 112(%rsi), %xmm11
+	movdqu 160(%rsi), %xmm12
+	movdqu 176(%rsi), %xmm13
+	movdqu 224(%rsi), %xmm14
+	movdqu 240(%rsi), %xmm15
+	pxor %xmm2, %xmm1
+	pxor %xmm6, %xmm5
+	pxor %xmm9, %xmm8
+	pxor %xmm11, %xmm10
+	pxor %xmm12, %xmm3
+	pxor %xmm13, %xmm7
+	pxor %xmm14, %xmm0
+	pxor %xmm15, %xmm4
+	movdqu %xmm1, 32(%rdx)
+	movdqu %xmm5, 48(%rdx)
+	movdqu %xmm8, 96(%rdx)
+	movdqu %xmm10, 112(%rdx)
+	movdqu %xmm3, 160(%rdx)
+	movdqu %xmm7, 176(%rdx)
+	movdqu %xmm0, 224(%rdx)
+	movdqu %xmm4, 240(%rdx)
+	addq $256, %rsi
+	jmp .Lchacha_blocks_ssse3_mainloop_cont
+.Lchacha_blocks_ssse3_noinput1:
+	movdqu %xmm5, 0(%rdx)
+	movdqu %xmm7, 16(%rdx)
+	movdqu %xmm8, 64(%rdx)
+	movdqu %xmm10, 80(%rdx)
+	movdqu %xmm1, 128(%rdx)
+	movdqu %xmm3, 144(%rdx)
+	movdqu %xmm0, 192(%rdx)
+	movdqu %xmm4, 208(%rdx)
+	movdqa 384(%rsp), %xmm0
+	movdqa 400(%rsp), %xmm1
+	movdqa 416(%rsp), %xmm2
+	movdqa 432(%rsp), %xmm3
+	movdqa 448(%rsp), %xmm4
+	movdqa 464(%rsp), %xmm5
+	movdqa 480(%rsp), %xmm6
+	movdqa 496(%rsp), %xmm7
+	movdqa %xmm0, %xmm8
+	movdqa %xmm2, %xmm9
+	movdqa %xmm4, %xmm10
+	movdqa %xmm6, %xmm11
+	punpckldq %xmm1, %xmm8
+	punpckldq %xmm3, %xmm9
+	punpckhdq %xmm1, %xmm0
+	punpckhdq %xmm3, %xmm2
+	punpckldq %xmm5, %xmm10
+	punpckldq %xmm7, %xmm11
+	punpckhdq %xmm5, %xmm4
+	punpckhdq %xmm7, %xmm6
+	movdqa %xmm8, %xmm1
+	movdqa %xmm0, %xmm3
+	movdqa %xmm10, %xmm5
+	movdqa %xmm4, %xmm7
+	punpcklqdq %xmm9, %xmm1
+	punpcklqdq %xmm11, %xmm5
+	punpckhqdq %xmm9, %xmm8
+	punpckhqdq %xmm11, %xmm10
+	punpcklqdq %xmm2, %xmm3
+	punpcklqdq %xmm6, %xmm7
+	punpckhqdq %xmm2, %xmm0
+	punpckhqdq %xmm6, %xmm4
+	movdqu %xmm1, 32(%rdx)
+	movdqu %xmm5, 48(%rdx)
+	movdqu %xmm8, 96(%rdx)
+	movdqu %xmm10, 112(%rdx)
+	movdqu %xmm3, 160(%rdx)
+	movdqu %xmm7, 176(%rdx)
+	movdqu %xmm0, 224(%rdx)
+	movdqu %xmm4, 240(%rdx)
+.Lchacha_blocks_ssse3_mainloop_cont:
+	addq $256, %rdx
+	subq $256, %rcx
+	cmp $256, %rcx
+	jae .Lchacha_blocks_ssse3_atleast256
+	movdqa 80(%rsp), %xmm6
+	movdqa 96(%rsp), %xmm7
+	movdqa 0(%rsp), %xmm8
+	movdqa 16(%rsp), %xmm9
+	movdqa 32(%rsp), %xmm10
+	movdqa 48(%rsp), %xmm11
+	movq $1, %r9
+.Lchacha_blocks_ssse3_below256:
+	movq %r9, %xmm5
+	andq %rcx, %rcx
+	jz .Lchacha_blocks_ssse3_done
+	cmpq $64, %rcx
+	jae .Lchacha_blocks_ssse3_above63
+	movq %rdx, %r9
+	andq %rsi, %rsi
+	jz .Lchacha_blocks_ssse3_noinput2
+	movq %rcx, %r10
+	movq %rsp, %rdx
+	addq %r10, %rsi
+	addq %r10, %rdx
+	negq %r10
+.Lchacha_blocks_ssse3_copyinput:
+	movb (%rsi, %r10), %al
+	movb %al, (%rdx, %r10)
+	incq %r10
+	jnz .Lchacha_blocks_ssse3_copyinput
+	movq %rsp, %rsi
+.Lchacha_blocks_ssse3_noinput2:
+	movq %rsp, %rdx
+.Lchacha_blocks_ssse3_above63:
+	movdqa %xmm8, %xmm0
+	movdqa %xmm9, %xmm1
+	movdqa %xmm10, %xmm2
+	movdqa %xmm11, %xmm3
+	movq 64(%rsp), %rax
+.Lchacha_blocks_ssse3_mainloop2:
+	paddd %xmm1, %xmm0
+	pxor %xmm0, %xmm3
+	pshufb %xmm6, %xmm3
+	paddd %xmm3, %xmm2
+	pxor %xmm2, %xmm1
+	movdqa %xmm1, %xmm4
+	pslld $12, %xmm4
+	psrld $20, %xmm1
+	pxor %xmm4, %xmm1
+	paddd %xmm1, %xmm0
+	pxor %xmm0, %xmm3
+	pshufb %xmm7, %xmm3
+	pshufd $0x93, %xmm0, %xmm0
+	paddd %xmm3, %xmm2
+	pshufd $0x4e, %xmm3, %xmm3
+	pxor %xmm2, %xmm1
+	pshufd $0x39, %xmm2, %xmm2
+	movdqa %xmm1, %xmm4
+	pslld $7, %xmm4
+	psrld $25, %xmm1
+	pxor %xmm4, %xmm1
+	paddd %xmm1, %xmm0
+	pxor %xmm0, %xmm3
+	pshufb %xmm6, %xmm3
+	paddd %xmm3, %xmm2
+	pxor %xmm2, %xmm1
+	movdqa %xmm1, %xmm4
+	pslld $12, %xmm4
+	psrld $20, %xmm1
+	pxor %xmm4, %xmm1
+	paddd %xmm1, %xmm0
+	pxor %xmm0, %xmm3
+	pshufb %xmm7, %xmm3
+	pshufd $0x39, %xmm0, %xmm0
+	paddd %xmm3, %xmm2
+	pshufd $0x4e, %xmm3, %xmm3
+	pxor %xmm2, %xmm1
+	pshufd $0x93, %xmm2, %xmm2
+	movdqa %xmm1, %xmm4
+	pslld $7, %xmm4
+	psrld $25, %xmm1
+	pxor %xmm4, %xmm1
+	subq $2, %rax
+	jnz .Lchacha_blocks_ssse3_mainloop2
+	paddd %xmm8, %xmm0
+	paddd %xmm9, %xmm1
+	paddd %xmm10, %xmm2
+	paddd %xmm11, %xmm3
+	andq %rsi, %rsi
+	jz .Lchacha_blocks_ssse3_noinput3
+	movdqu 0(%rsi), %xmm12
+	movdqu 16(%rsi), %xmm13
+	movdqu 32(%rsi), %xmm14
+	movdqu 48(%rsi), %xmm15
+	pxor %xmm12, %xmm0
+	pxor %xmm13, %xmm1
+	pxor %xmm14, %xmm2
+	pxor %xmm15, %xmm3
+	addq $64, %rsi
+.Lchacha_blocks_ssse3_noinput3:
+	movdqu %xmm0, 0(%rdx)
+	movdqu %xmm1, 16(%rdx)
+	movdqu %xmm2, 32(%rdx)
+	movdqu %xmm3, 48(%rdx)
+	paddq %xmm5, %xmm11
+	cmpq $64, %rcx
+	jbe .Lchacha_blocks_ssse3_mainloop2_finishup
+	addq $64, %rdx
+	subq $64, %rcx
+	jmp .Lchacha_blocks_ssse3_below256
+.Lchacha_blocks_ssse3_mainloop2_finishup:
+	cmpq $64, %rcx
+	je .Lchacha_blocks_ssse3_done
+	addq %rcx, %r9
+	addq %rcx, %rdx
+	negq %rcx
+.Lchacha_blocks_ssse3_copyoutput:
+	movb (%rdx, %rcx), %al
+	movb %al, (%r9, %rcx)
+	incq %rcx
+	jnz .Lchacha_blocks_ssse3_copyoutput
+.Lchacha_blocks_ssse3_done:
+	movdqu %xmm11, 48(%rdi)
+	movq %rbp, %rsp
+	popq %rbp
+	popq %rbx
+	movl $(63 + 512 + 16), %eax
+	ret
+.size _gcry_chacha20_amd64_ssse3_blocks,.-_gcry_chacha20_amd64_ssse3_blocks;
+
+.data
+.align 16;
+.LC:
+.byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13       /* pshufb rotate by 16 */
+.byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14       /* pshufb rotate by 8 */
+
+#endif /*defined(USE_CHACHA20)*/
+#endif /*__x86_64*/
diff --git a/cipher/chacha20.c b/cipher/chacha20.c
index f250789..1ffa458 100644
--- a/cipher/chacha20.c
+++ b/cipher/chacha20.c
@@ -40,6 +40,14 @@
 #include "bufhelp.h"
 
 
+/* USE_SSSE3 indicates whether to compile with Intel SSSE3 code. */
+#undef USE_SSSE3
+#if defined(__x86_64__) && defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) && \
+    defined(HAVE_GCC_INLINE_ASM_SSSE3)
+# define USE_SSSE3 1
+#endif
+
+
 #define CHACHA20_MIN_KEY_SIZE 16	/* Bytes.  */
 #define CHACHA20_MAX_KEY_SIZE 32	/* Bytes.  */
 #define CHACHA20_BLOCK_SIZE   64	/* Bytes.  */
@@ -59,10 +67,29 @@ typedef struct CHACHA20_context_s
   u32 input[CHACHA20_INPUT_LENGTH];
   u32 pad[CHACHA20_INPUT_LENGTH];
   unsigned int unused;		/* bytes in the pad.  */
+#ifdef USE_SSSE3
+  unsigned int use_ssse3:1;
+#endif
   chacha20_core_t core;
 } CHACHA20_context_t;
 
 
+#ifdef USE_SSSE3
+
+unsigned int _gcry_chacha20_amd64_ssse3_blocks(u32 *state,
+					       const byte *in,
+					       byte *out,
+					       size_t bytes);
+
+static unsigned int
+chacha20_core_ssse3 (u32 * dst, CHACHA20_context_t * ctx)
+{
+  return _gcry_chacha20_amd64_ssse3_blocks(ctx->input, NULL, (byte *)dst, 64);
+}
+
+#endif /* USE_SSSE3 */
+
+
 static void chacha20_setiv (void *context, const byte * iv, size_t ivlen);
 static const char *selftest (void);
 

@@ -262,6 +289,7 @@ chacha20_do_setkey (CHACHA20_context_t * ctx,
 {
   static int initialized;
   static const char *selftest_failed;
+  unsigned int features = _gcry_get_hw_features ();
 
   if (!initialized)
     {
@@ -278,6 +306,14 @@ chacha20_do_setkey (CHACHA20_context_t * ctx,
 
   ctx->core = chacha20_core;
 
+#ifdef USE_SSSE3
+  ctx->use_ssse3 = (features & HWF_INTEL_SSSE3) != 0;
+  if (ctx->use_ssse3)
+    ctx->core = chacha20_core_ssse3;
+#endif
+
+  (void)features;
+
   chacha20_keysetup (ctx, key, keylen);
 
   /* We default to a zero nonce.  */
@@ -348,7 +384,17 @@ chacha20_do_encrypt_stream (CHACHA20_context_t * ctx,
     {
       size_t nblocks = length / CHACHA20_BLOCK_SIZE;
       size_t bytes = nblocks * CHACHA20_BLOCK_SIZE;
-      burn = chacha20_blocks(ctx->input, inbuf, outbuf, bytes);
+
+      if (0)
+        { }
+#ifdef USE_SSSE3
+      else if (ctx->use_ssse3)
+        burn = _gcry_chacha20_amd64_ssse3_blocks(ctx->input, inbuf, outbuf,
+                                                 bytes);
+#endif
+      else
+        burn = chacha20_blocks(ctx->input, inbuf, outbuf, bytes);
+
       length -= bytes;
       outbuf += bytes;
       inbuf  += bytes;
diff --git a/configure.ac b/configure.ac
index 724c4ee..7d0dc88 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1810,6 +1810,13 @@ LIST_MEMBER(chacha20, $enabled_ciphers)
 if test "$found" = "1" ; then
    GCRYPT_CIPHERS="$GCRYPT_CIPHERS chacha20.lo"
    AC_DEFINE(USE_CHACHA20, 1, [Defined if this module should be included])
+
+   case "${host}" in
+      x86_64-*-*)
+         # Build with the assembly implementation
+         GCRYPT_CIPHERS="$GCRYPT_CIPHERS chacha20-ssse3-amd64.lo"
+      ;;
+   esac
 fi
 
 LIST_MEMBER(dsa, $enabled_pubkey_ciphers)




More information about the Gcrypt-devel mailing list