[PATCH 2/2] serpent: add AVX accelerated x86-64 implementation

Jussi Kivilinna jussi.kivilinna at iki.fi
Sun May 5 17:55:38 CEST 2013


* configure.ac (serpent) [ENABLE_AVX_SUPPORT]: Add
'serpent_avx_x86-64.lo'.
* cipher/Makefile.am (EXTRA_libcipher_la_SOURCES): Add
'serpent_avx_x86-64.S'.
* cipher/cipher.c (gcry_cipher_open) [USE_SERPENT]: Register bulk
functions for CBC-decryption and CTR-mode.
* cipher/serpent.c (USE_AVX): New macro.
[USE_AVX] (serpent_context_t): Add 'use_avx'.
[USE_AVX] (_gcry_serpent_avx_ctr_enc, _gcry_serpent_avx_cbc_dec): New
prototypes to assembler functions.
(serpent_setkey): Set 'serpent_init_done' before calling serpent_test.
(serpent_setkey) [USE_AVX]: Enable 'use_avx' is hardware supports AVX.
(_gcry_serpent_ctr_enc): New function.
(_gcry_serpent_cbc_dec): New function.
(selftest_ctr_128): New function.
(selftest_cbc_128): New function.
(selftest): Call selftest_ctr_128 and selftest_cbc_128.
* cipher/serpent_avx_x86-64.S: New file.
* src/cipher.h (_gcry_serpent_ctr_enc): New prototype.
(_gcry_serpent_cbc_dec): New prototype.
--

Patch adds word-sliced AVX implementation of Serpent for x86-64 for speeding
up parallelizable workloads (CTR mode, CBC mode decryption). Implementation
processes eight blocks in parallel, with two four-block sets interleaved for
out-of-order scheduling.

Speed old vs. new on Intel Core i5-2450M (Sandy-Bridge):

                ECB/Stream         CBC             CFB             OFB             CTR
             --------------- --------------- --------------- --------------- ---------------
SERPENT128    1.00x   1.00x   1.00x   4.44x   1.00x   0.99x   1.00x   1.00x   4.68x   4.58x
SERPENT256    1.00x   0.99x   1.00x   4.42x   1.00x   1.01x   1.00x   1.00x   4.68x   4.68x

Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>
---
 cipher/Makefile.am          |    2 
 cipher/cipher.c             |    8 
 cipher/serpent.c            |  209 ++++++++++++
 cipher/serpent_avx_x86-64.S |  766 +++++++++++++++++++++++++++++++++++++++++++
 configure.ac                |    5 
 src/cipher.h                |    7 
 6 files changed, 994 insertions(+), 3 deletions(-)
 create mode 100644 cipher/serpent_avx_x86-64.S

diff --git a/cipher/Makefile.am b/cipher/Makefile.am
index 0b61a27..f9291a8 100644
--- a/cipher/Makefile.am
+++ b/cipher/Makefile.am
@@ -68,7 +68,7 @@ rmd160.c \
 rsa.c \
 scrypt.c \
 seed.c \
-serpent.c \
+serpent.c serpent_avx_x86-64.S \
 sha1.c \
 sha256.c \
 sha512.c \
diff --git a/cipher/cipher.c b/cipher/cipher.c
index f1224af..20ac2c7 100644
--- a/cipher/cipher.c
+++ b/cipher/cipher.c
@@ -726,6 +726,14 @@ gcry_cipher_open (gcry_cipher_hd_t *handle,
               h->bulk.ctr_enc = _gcry_camellia_ctr_enc;
               break;
 #endif /*USE_CAMELLIA*/
+#ifdef USE_SERPENT
+	    case GCRY_CIPHER_SERPENT128:
+	    case GCRY_CIPHER_SERPENT192:
+	    case GCRY_CIPHER_SERPENT256:
+              h->bulk.cbc_dec = _gcry_serpent_cbc_dec;
+              h->bulk.ctr_enc = _gcry_serpent_ctr_enc;
+              break;
+#endif /*USE_SERPENT*/
 
             default:
               break;
diff --git a/cipher/serpent.c b/cipher/serpent.c
index 72840cf..b38c586 100644
--- a/cipher/serpent.c
+++ b/cipher/serpent.c
@@ -28,6 +28,17 @@
 #include "g10lib.h"
 #include "cipher.h"
 #include "bithelp.h"
+#include "bufhelp.h"
+#include "selftest_help.h"
+
+
+/* USE_AVX indicates whether to compile with Intel AVX code. */
+#undef USE_AVX
+#if defined(ENABLE_AVX_SUPPORT)
+# if defined(__x86_64__)
+#  define USE_AVX 1
+# endif
+#endif
 
 /* Number of rounds per Serpent encrypt/decrypt operation.  */
 #define ROUNDS 32
@@ -49,9 +60,28 @@ typedef u32 serpent_subkeys_t[ROUNDS + 1][4];
 typedef struct serpent_context
 {
   serpent_subkeys_t keys;	/* Generated subkeys.  */
+
+#ifdef USE_AVX
+  int use_avx;			/* AVX implementation shall be used. */
+#endif /*USE_AVX*/
 } serpent_context_t;
 
 
+#ifdef USE_AVX
+/* Assembler implementations of Serpent using AVX.  Process data in 18 block
+   same time.
+ */
+extern void _gcry_serpent_avx_ctr_enc(serpent_context_t *ctx,
+				      unsigned char *out,
+				      const unsigned char *in,
+				      unsigned char *ctr);
+
+extern void _gcry_serpent_avx_cbc_dec(serpent_context_t *ctx,
+				      unsigned char *out,
+				      const unsigned char *in,
+				      unsigned char *iv);
+#endif
+
 /* A prototype.  */
 static const char *serpent_test (void);
 
@@ -191,7 +221,7 @@ static const char *serpent_test (void);
     r4 &= r0; r1 ^= r3; \
     r4 ^= r2; r1 |= r0; \
     r1 ^= r2; r0 ^= r3; \
-    r2  = r1; r1 |= r3; \
+    r2 =  r1; r1 |= r3; \
     r1 ^= r0; \
     \
     w = r1; x = r2; y = r3; z = r4; \
@@ -587,10 +617,10 @@ serpent_setkey (void *ctx,
   if (! serpent_init_done)
     {
       /* Execute a self-test the first time, Serpent is used.  */
+      serpent_init_done = 1;
       serpent_test_ret = serpent_test ();
       if (serpent_test_ret)
 	log_error ("Serpent test failure: %s\n", serpent_test_ret);
-      serpent_init_done = 1;
     }
 
   if (serpent_test_ret)
@@ -601,6 +631,14 @@ serpent_setkey (void *ctx,
       _gcry_burn_stack (sizeof (serpent_key_t));
     }
 
+#ifdef USE_AVX
+  context->use_avx = 0;
+  if ((_gcry_get_hw_features () & HWF_INTEL_AVX))
+    {
+      context->use_avx = 1;
+    }
+#endif
+
   return ret;
 }
 
@@ -740,6 +778,166 @@ serpent_decrypt (void *ctx, byte *buffer_out, const byte *buffer_in)
 
 

 
+/* Bulk encryption of complete blocks in CTR mode.  This function is only
+   intended for the bulk encryption feature of cipher.c.  CTR is expected to be
+   of size sizeof(serpent_block_t). */
+void
+_gcry_serpent_ctr_enc(void *context, unsigned char *ctr,
+                      void *outbuf_arg, const void *inbuf_arg,
+                      unsigned int nblocks)
+{
+  serpent_context_t *ctx = context;
+  unsigned char *outbuf = outbuf_arg;
+  const unsigned char *inbuf = inbuf_arg;
+  unsigned char tmpbuf[sizeof(serpent_block_t)];
+  int burn_stack_depth = 2 * sizeof (serpent_block_t);
+  int i;
+
+#ifdef USE_AVX
+  if (ctx->use_avx)
+    {
+      int did_use_avx = 0;
+
+      /* Process data in 8 block chunks. */
+      while (nblocks >= 8)
+        {
+          _gcry_serpent_avx_ctr_enc(ctx, outbuf, inbuf, ctr);
+
+          nblocks -= 8;
+          outbuf += 8 * sizeof(serpent_block_t);
+          inbuf  += 8 * sizeof(serpent_block_t);
+          did_use_avx = 1;
+        }
+
+      if (did_use_avx)
+        {
+          /* clear AVX registers */
+          asm volatile ("vzeroall;\n":::);
+
+          /* serpent-avx assembly code does not use stack */
+          if (nblocks == 0)
+            burn_stack_depth = 0;
+        }
+
+      /* Use generic code to handle smaller chunks... */
+      /* TODO: use caching instead? */
+    }
+#endif
+
+  for ( ;nblocks; nblocks-- )
+    {
+      /* Encrypt the counter. */
+      serpent_encrypt_internal(ctx, ctr, tmpbuf);
+      /* XOR the input with the encrypted counter and store in output.  */
+      buf_xor(outbuf, tmpbuf, inbuf, sizeof(serpent_block_t));
+      outbuf += sizeof(serpent_block_t);
+      inbuf  += sizeof(serpent_block_t);
+      /* Increment the counter.  */
+      for (i = sizeof(serpent_block_t); i > 0; i--)
+        {
+          ctr[i-1]++;
+          if (ctr[i-1])
+            break;
+        }
+    }
+
+  wipememory(tmpbuf, sizeof(tmpbuf));
+  _gcry_burn_stack(burn_stack_depth);
+}
+
+/* Bulk decryption of complete blocks in CBC mode.  This function is only
+   intended for the bulk encryption feature of cipher.c. */
+void
+_gcry_serpent_cbc_dec(void *context, unsigned char *iv,
+                       void *outbuf_arg, const void *inbuf_arg,
+                       unsigned int nblocks)
+{
+  serpent_context_t *ctx = context;
+  unsigned char *outbuf = outbuf_arg;
+  const unsigned char *inbuf = inbuf_arg;
+  unsigned char savebuf[sizeof(serpent_block_t)];
+  int burn_stack_depth = 2 * sizeof (serpent_block_t);
+
+#ifdef USE_AVX
+  if (ctx->use_avx)
+    {
+      int did_use_avx = 0;
+
+      /* Process data in 8 block chunks. */
+      while (nblocks >= 8)
+        {
+          _gcry_serpent_avx_cbc_dec(ctx, outbuf, inbuf, iv);
+
+          nblocks -= 8;
+          outbuf += 8 * sizeof(serpent_block_t);
+          inbuf  += 8 * sizeof(serpent_block_t);
+          did_use_avx = 1;
+        }
+
+      if (did_use_avx)
+        {
+          /* clear AVX registers */
+          asm volatile ("vzeroall;\n":::);
+
+          /* serpent-avx assembly code does not use stack */
+          if (nblocks == 0)
+            burn_stack_depth = 0;
+        }
+
+      /* Use generic code to handle smaller chunks... */
+    }
+#endif
+
+  for ( ;nblocks; nblocks-- )
+    {
+      /* We need to save INBUF away because it may be identical to
+         OUTBUF.  */
+      memcpy(savebuf, inbuf, sizeof(serpent_block_t));
+
+      serpent_decrypt_internal (ctx, inbuf, outbuf);
+
+      buf_xor(outbuf, outbuf, iv, sizeof(serpent_block_t));
+      memcpy(iv, savebuf, sizeof(serpent_block_t));
+      inbuf += sizeof(serpent_block_t);
+      outbuf += sizeof(serpent_block_t);
+    }
+
+  wipememory(savebuf, sizeof(savebuf));
+  _gcry_burn_stack(burn_stack_depth);
+}
+
+

+
+/* Run the self-tests for SERPENT-CTR-128, tests IV increment of bulk CTR
+   encryption.  Returns NULL on success. */
+static const char*
+selftest_ctr_128 (void)
+{
+  const int nblocks = 8+1;
+  const int blocksize = sizeof(serpent_block_t);
+  const int context_size = sizeof(serpent_context_t);
+
+  return _gcry_selftest_helper_ctr_128("SERPENT", &serpent_setkey,
+           &serpent_encrypt, &_gcry_serpent_ctr_enc, nblocks, blocksize,
+	   context_size);
+}
+
+
+/* Run the self-tests for SERPENT-CBC-128, tests bulk CBC decryption.
+   Returns NULL on success. */
+static const char*
+selftest_cbc_128 (void)
+{
+  const int nblocks = 8+2;
+  const int blocksize = sizeof(serpent_block_t);
+  const int context_size = sizeof(serpent_context_t);
+
+  return _gcry_selftest_helper_cbc_128("SERPENT", &serpent_setkey,
+           &serpent_encrypt, &_gcry_serpent_cbc_dec, nblocks, blocksize,
+	   context_size);
+}
+
+
 /* Serpent test.  */
 
 static const char *
@@ -748,6 +946,7 @@ serpent_test (void)
   serpent_context_t context;
   unsigned char scratch[16];
   unsigned int i;
+  const char *r;
 
   static struct test
   {
@@ -819,6 +1018,12 @@ serpent_test (void)
 	}
     }
 
+  if ( (r = selftest_ctr_128 ()) )
+    return r;
+
+  if ( (r = selftest_cbc_128 ()) )
+    return r;
+
   return NULL;
 }
 
diff --git a/cipher/serpent_avx_x86-64.S b/cipher/serpent_avx_x86-64.S
new file mode 100644
index 0000000..0544f67
--- /dev/null
+++ b/cipher/serpent_avx_x86-64.S
@@ -0,0 +1,766 @@
+/* serpent_avx_x86-64.S  -  AVX implementation of Serpent cipher
+ *
+ * Copyright © 2013 Jussi Kivilinna <jussi.kivilinna at iki.fi>
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifdef __x86_64
+#include <config.h>
+#if defined(ENABLE_AVX_SUPPORT) && defined(USE_SERPENT)
+
+#ifdef __PIC__
+#  define RIP (%rip)
+#else
+#  define RIP
+#endif
+
+/* struct serpent_context: */
+#define ctx_keys 0
+
+/* register macros */
+#define CTX %rdi
+
+/* vector registers */
+.set RA0, %xmm0
+.set RA1, %xmm1
+.set RA2, %xmm2
+.set RA3, %xmm3
+.set RA4, %xmm4
+
+.set RB0, %xmm5
+.set RB1, %xmm6
+.set RB2, %xmm7
+.set RB3, %xmm8
+.set RB4, %xmm9
+
+.set RNOT, %xmm10
+.set RTMP0, %xmm11
+.set RTMP1, %xmm12
+.set RTMP2, %xmm13
+
+/**********************************************************************
+  helper macros
+ **********************************************************************/
+
+/* preprocessor macro for renaming vector registers using GAS macros */
+#define sbox_reg_rename(r0, r1, r2, r3, r4, \
+			new_r0, new_r1, new_r2, new_r3, new_r4) \
+	.set rename_reg0, new_r0; \
+	.set rename_reg1, new_r1; \
+	.set rename_reg2, new_r2; \
+	.set rename_reg3, new_r3; \
+	.set rename_reg4, new_r4; \
+	\
+	.set r0, rename_reg0; \
+	.set r1, rename_reg1; \
+	.set r2, rename_reg2; \
+	.set r3, rename_reg3; \
+	.set r4, rename_reg4;
+
+/* vector 32-bit rotation to left */
+#define vec_rol(reg, nleft, tmp) \
+	vpslld $(nleft), reg, tmp;	\
+	vpsrld $(32 - (nleft)), reg, reg;	\
+	vpor reg, tmp, reg;
+
+/* vector 32-bit rotation to right */
+#define vec_ror(reg, nright, tmp) \
+	vec_rol(reg, 32 - nright, tmp)
+
+/* 4x4 32-bit integer matrix transpose */
+#define transpose_4x4(x0, x1, x2, x3, t1, t2) \
+	vpunpckhdq x1, x0, t2; \
+	vpunpckldq x1, x0, x0; \
+	\
+	vpunpckldq x3, x2, t1; \
+	vpunpckhdq x3, x2, x2; \
+	\
+	vpunpckhqdq t1, x0, x1; \
+	vpunpcklqdq t1, x0, x0; \
+	\
+	vpunpckhqdq x2, t2, x3; \
+	vpunpcklqdq x2, t2, x2;
+
+/**********************************************************************
+  8-way serpent
+ **********************************************************************/
+
+/*
+ * These are the S-Boxes of Serpent from following research paper.
+ *
+ *  D. A. Osvik, “Speeding up Serpent,” in Third AES Candidate Conference,
+ *   (New York, New York, USA), p. 317–329, National Institute of Standards and
+ *   Technology, 2000.
+ *
+ * Paper is also available at: http://www.ii.uib.no/~osvik/pub/aes3.pdf
+ *
+ */
+#define SBOX0(r0, r1, r2, r3, r4) \
+	vpxor	r3, r0, r3;	vmovdqa	r1, r4;		\
+	vpand	r1, r3, r1;	vpxor	r4, r2, r4;	\
+	vpxor	r1, r0, r1;	vpor	r0, r3, r0;	\
+	vpxor	r0, r4, r0;	vpxor	r4, r3, r4;	\
+	vpxor	r3, r2, r3;	vpor	r2, r1, r2;	\
+	vpxor	r2, r4, r2;	vpxor	r4, RNOT, r4;	\
+	vpor	r4, r1, r4;	vpxor	r1, r3, r1;	\
+	vpxor	r1, r4, r1;	vpor	r3, r0, r3;	\
+	vpxor	r1, r3, r1;	vpxor	r4, r3, r4;	\
+	\
+	sbox_reg_rename(r0,r1,r2,r3,r4, r1,r4,r2,r0,r3);
+
+#define SBOX0_INVERSE(r0, r1, r2, r3, r4) \
+	vpxor	r2, RNOT, r2;	vmovdqa	r1, r4;		\
+	vpor	r1, r0, r1;	vpxor	r4, RNOT, r4;	\
+	vpxor	r1, r2, r1;	vpor	r2, r4, r2;	\
+	vpxor	r1, r3, r1;	vpxor	r0, r4, r0;	\
+	vpxor	r2, r0, r2;	vpand	r0, r3, r0;	\
+	vpxor	r4, r0, r4;	vpor	r0, r1, r0;	\
+	vpxor	r0, r2, r0;	vpxor	r3, r4, r3;	\
+	vpxor	r2, r1, r2;	vpxor	r3, r0, r3;	\
+	vpxor	r3, r1, r3;	\
+	vpand	r2, r3, r2;	\
+	vpxor	r4, r2, r4;	\
+	\
+	sbox_reg_rename(r0,r1,r2,r3,r4, r0,r4,r1,r3,r2);
+
+#define SBOX1(r0, r1, r2, r3, r4) \
+	vpxor	r0, RNOT, r0;	vpxor	r2, RNOT, r2;	\
+	vmovdqa	r0, r4;		vpand	r0, r1, r0;	\
+	vpxor	r2, r0, r2;	vpor	r0, r3, r0;	\
+	vpxor	r3, r2, r3;	vpxor	r1, r0, r1;	\
+	vpxor	r0, r4, r0;	vpor	r4, r1, r4;	\
+	vpxor	r1, r3, r1;	vpor	r2, r0, r2;	\
+	vpand	r2, r4, r2;	vpxor	r0, r1, r0;	\
+	vpand	r1, r2, r1;	\
+	vpxor	r1, r0, r1;	vpand	r0, r2, r0;	\
+	vpxor	r0, r4, r0;	\
+	\
+	sbox_reg_rename(r0,r1,r2,r3,r4, r2,r0,r3,r1,r4);
+
+#define SBOX1_INVERSE(r0, r1, r2, r3, r4) \
+	vmovdqa	r1, r4;		vpxor	r1, r3, r1;	\
+	vpand	r3, r1, r3;	vpxor	r4, r2, r4;	\
+	vpxor	r3, r0, r3;	vpor	r0, r1, r0;	\
+	vpxor	r2, r3, r2;	vpxor	r0, r4, r0;	\
+	vpor	r0, r2, r0;	vpxor	r1, r3, r1;	\
+	vpxor	r0, r1, r0;	vpor	r1, r3, r1;	\
+	vpxor	r1, r0, r1;	vpxor	r4, RNOT, r4;	\
+	vpxor	r4, r1, r4;	vpor	r1, r0, r1;	\
+	vpxor	r1, r0, r1;	\
+	vpor	r1, r4, r1;	\
+	vpxor	r3, r1, r3;	\
+	\
+	sbox_reg_rename(r0,r1,r2,r3,r4, r4,r0,r3,r2,r1);
+
+#define SBOX2(r0, r1, r2, r3, r4) \
+	vmovdqa	r0, r4;		vpand	r0, r2, r0;	\
+	vpxor	r0, r3, r0;	vpxor	r2, r1, r2;	\
+	vpxor	r2, r0, r2;	vpor	r3, r4, r3;	\
+	vpxor	r3, r1, r3;	vpxor	r4, r2, r4;	\
+	vmovdqa	r3, r1;		vpor	r3, r4, r3;	\
+	vpxor	r3, r0, r3;	vpand	r0, r1, r0;	\
+	vpxor	r4, r0, r4;	vpxor	r1, r3, r1;	\
+	vpxor	r1, r4, r1;	vpxor	r4, RNOT, r4;	\
+	\
+	sbox_reg_rename(r0,r1,r2,r3,r4, r2,r3,r1,r4,r0);
+
+#define SBOX2_INVERSE(r0, r1, r2, r3, r4) \
+	vpxor	r2, r3, r2;	vpxor	r3, r0, r3;	\
+	vmovdqa	r3, r4;		vpand	r3, r2, r3;	\
+	vpxor	r3, r1, r3;	vpor	r1, r2, r1;	\
+	vpxor	r1, r4, r1;	vpand	r4, r3, r4;	\
+	vpxor	r2, r3, r2;	vpand	r4, r0, r4;	\
+	vpxor	r4, r2, r4;	vpand	r2, r1, r2;	\
+	vpor	r2, r0, r2;	vpxor	r3, RNOT, r3;	\
+	vpxor	r2, r3, r2;	vpxor	r0, r3, r0;	\
+	vpand	r0, r1, r0;	vpxor	r3, r4, r3;	\
+	vpxor	r3, r0, r3;	\
+	\
+	sbox_reg_rename(r0,r1,r2,r3,r4, r1,r4,r2,r3,r0);
+
+#define SBOX3(r0, r1, r2, r3, r4) \
+	vmovdqa	r0, r4;		vpor	r0, r3, r0;	\
+	vpxor	r3, r1, r3;	vpand	r1, r4, r1;	\
+	vpxor	r4, r2, r4;	vpxor	r2, r3, r2;	\
+	vpand	r3, r0, r3;	vpor	r4, r1, r4;	\
+	vpxor	r3, r4, r3;	vpxor	r0, r1, r0;	\
+	vpand	r4, r0, r4;	vpxor	r1, r3, r1;	\
+	vpxor	r4, r2, r4;	vpor	r1, r0, r1;	\
+	vpxor	r1, r2, r1;	vpxor	r0, r3, r0;	\
+	vmovdqa	r1, r2;		vpor	r1, r3, r1;	\
+	vpxor	r1, r0, r1;	\
+	\
+	sbox_reg_rename(r0,r1,r2,r3,r4, r1,r2,r3,r4,r0);
+
+#define SBOX3_INVERSE(r0, r1, r2, r3, r4) \
+	vmovdqa	r2, r4;		vpxor	r2, r1, r2;	\
+	vpxor	r0, r2, r0;	vpand	r4, r2, r4;	\
+	vpxor	r4, r0, r4;	vpand	r0, r1, r0;	\
+	vpxor	r1, r3, r1;	vpor	r3, r4, r3;	\
+	vpxor	r2, r3, r2;	vpxor	r0, r3, r0;	\
+	vpxor	r1, r4, r1;	vpand	r3, r2, r3;	\
+	vpxor	r3, r1, r3;	vpxor	r1, r0, r1;	\
+	vpor	r1, r2, r1;	vpxor	r0, r3, r0;	\
+	vpxor	r1, r4, r1;	\
+	vpxor	r0, r1, r0;	\
+	\
+	sbox_reg_rename(r0,r1,r2,r3,r4, r2,r1,r3,r0,r4);
+
+#define SBOX4(r0, r1, r2, r3, r4) \
+	vpxor	r1, r3, r1;	vpxor	r3, RNOT, r3;	\
+	vpxor	r2, r3, r2;	vpxor	r3, r0, r3;	\
+	vmovdqa	r1, r4;		vpand	r1, r3, r1;	\
+	vpxor	r1, r2, r1;	vpxor	r4, r3, r4;	\
+	vpxor	r0, r4, r0;	vpand	r2, r4, r2;	\
+	vpxor	r2, r0, r2;	vpand	r0, r1, r0;	\
+	vpxor	r3, r0, r3;	vpor	r4, r1, r4;	\
+	vpxor	r4, r0, r4;	vpor	r0, r3, r0;	\
+	vpxor	r0, r2, r0;	vpand	r2, r3, r2;	\
+	vpxor	r0, RNOT, r0;	vpxor	r4, r2, r4;	\
+	\
+	sbox_reg_rename(r0,r1,r2,r3,r4, r1,r4,r0,r3,r2);
+
+#define SBOX4_INVERSE(r0, r1, r2, r3, r4) \
+	vmovdqa	r2, r4;		vpand	r2, r3, r2;	\
+	vpxor	r2, r1, r2;	vpor	r1, r3, r1;	\
+	vpand	r1, r0, r1;	vpxor	r4, r2, r4;	\
+	vpxor	r4, r1, r4;	vpand	r1, r2, r1;	\
+	vpxor	r0, RNOT, r0;	vpxor	r3, r4, r3;	\
+	vpxor	r1, r3, r1;	vpand	r3, r0, r3;	\
+	vpxor	r3, r2, r3;	vpxor	r0, r1, r0;	\
+	vpand	r2, r0, r2;	vpxor	r3, r0, r3;	\
+	vpxor	r2, r4, r2;	\
+	vpor	r2, r3, r2;	vpxor	r3, r0, r3;	\
+	vpxor	r2, r1, r2;	\
+	\
+	sbox_reg_rename(r0,r1,r2,r3,r4, r0,r3,r2,r4,r1);
+
+#define SBOX5(r0, r1, r2, r3, r4) \
+	vpxor	r0, r1, r0;	vpxor	r1, r3, r1;	\
+	vpxor	r3, RNOT, r3;	vmovdqa	r1, r4;		\
+	vpand	r1, r0, r1;	vpxor	r2, r3, r2;	\
+	vpxor	r1, r2, r1;	vpor	r2, r4, r2;	\
+	vpxor	r4, r3, r4;	vpand	r3, r1, r3;	\
+	vpxor	r3, r0, r3;	vpxor	r4, r1, r4;	\
+	vpxor	r4, r2, r4;	vpxor	r2, r0, r2;	\
+	vpand	r0, r3, r0;	vpxor	r2, RNOT, r2;	\
+	vpxor	r0, r4, r0;	vpor	r4, r3, r4;	\
+	vpxor	r2, r4, r2;	\
+	\
+	sbox_reg_rename(r0,r1,r2,r3,r4, r1,r3,r0,r2,r4);
+
+#define SBOX5_INVERSE(r0, r1, r2, r3, r4) \
+	vpxor	r1, RNOT, r1;	vmovdqa	r3, r4;		\
+	vpxor	r2, r1, r2;	vpor	r3, r0, r3;	\
+	vpxor	r3, r2, r3;	vpor	r2, r1, r2;	\
+	vpand	r2, r0, r2;	vpxor	r4, r3, r4;	\
+	vpxor	r2, r4, r2;	vpor	r4, r0, r4;	\
+	vpxor	r4, r1, r4;	vpand	r1, r2, r1;	\
+	vpxor	r1, r3, r1;	vpxor	r4, r2, r4;	\
+	vpand	r3, r4, r3;	vpxor	r4, r1, r4;	\
+	vpxor	r3, r4, r3;	vpxor	r4, RNOT, r4;	\
+	vpxor	r3, r0, r3;	\
+	\
+	sbox_reg_rename(r0,r1,r2,r3,r4, r1,r4,r3,r2,r0);
+
+#define SBOX6(r0, r1, r2, r3, r4) \
+	vpxor	r2, RNOT, r2;	vmovdqa	r3, r4;		\
+	vpand	r3, r0, r3;	vpxor	r0, r4, r0;	\
+	vpxor	r3, r2, r3;	vpor	r2, r4, r2;	\
+	vpxor	r1, r3, r1;	vpxor	r2, r0, r2;	\
+	vpor	r0, r1, r0;	vpxor	r2, r1, r2;	\
+	vpxor	r4, r0, r4;	vpor	r0, r3, r0;	\
+	vpxor	r0, r2, r0;	vpxor	r4, r3, r4;	\
+	vpxor	r4, r0, r4;	vpxor	r3, RNOT, r3;	\
+	vpand	r2, r4, r2;	\
+	vpxor	r2, r3, r2;	\
+	\
+	sbox_reg_rename(r0,r1,r2,r3,r4, r0,r1,r4,r2,r3);
+
+#define SBOX6_INVERSE(r0, r1, r2, r3, r4) \
+	vpxor	r0, r2, r0;	vmovdqa	r2, r4;		\
+	vpand	r2, r0, r2;	vpxor	r4, r3, r4;	\
+	vpxor	r2, RNOT, r2;	vpxor	r3, r1, r3;	\
+	vpxor	r2, r3, r2;	vpor	r4, r0, r4;	\
+	vpxor	r0, r2, r0;	vpxor	r3, r4, r3;	\
+	vpxor	r4, r1, r4;	vpand	r1, r3, r1;	\
+	vpxor	r1, r0, r1;	vpxor	r0, r3, r0;	\
+	vpor	r0, r2, r0;	vpxor	r3, r1, r3;	\
+	vpxor	r4, r0, r4;	\
+	\
+	sbox_reg_rename(r0,r1,r2,r3,r4, r1,r2,r4,r3,r0);
+
+#define SBOX7(r0, r1, r2, r3, r4) \
+	vmovdqa	r1, r4;		vpor	r1, r2, r1;	\
+	vpxor	r1, r3, r1;	vpxor	r4, r2, r4;	\
+	vpxor	r2, r1, r2;	vpor	r3, r4, r3;	\
+	vpand	r3, r0, r3;	vpxor	r4, r2, r4;	\
+	vpxor	r3, r1, r3;	vpor	r1, r4, r1;	\
+	vpxor	r1, r0, r1;	vpor	r0, r4, r0;	\
+	vpxor	r0, r2, r0;	vpxor	r1, r4, r1;	\
+	vpxor	r2, r1, r2;	vpand	r1, r0, r1;	\
+	vpxor	r1, r4, r1;	vpxor	r2, RNOT, r2;	\
+	vpor	r2, r0, r2;	\
+	vpxor	r4, r2, r4;	\
+	\
+	sbox_reg_rename(r0,r1,r2,r3,r4, r4,r3,r1,r0,r2);
+
+#define SBOX7_INVERSE(r0, r1, r2, r3, r4) \
+	vmovdqa	r2, r4;		vpxor	r2, r0, r2;	\
+	vpand	r0, r3, r0;	vpor	r4, r3, r4;	\
+	vpxor	r2, RNOT, r2;	vpxor	r3, r1, r3;	\
+	vpor	r1, r0, r1;	vpxor	r0, r2, r0;	\
+	vpand	r2, r4, r2;	vpand	r3, r4, r3;	\
+	vpxor	r1, r2, r1;	vpxor	r2, r0, r2;	\
+	vpor	r0, r2, r0;	vpxor	r4, r1, r4;	\
+	vpxor	r0, r3, r0;	vpxor	r3, r4, r3;	\
+	vpor	r4, r0, r4;	vpxor	r3, r2, r3;	\
+	vpxor	r4, r2, r4;	\
+	\
+	sbox_reg_rename(r0,r1,r2,r3,r4, r3,r0,r1,r4,r2);
+
+/* Apply SBOX number WHICH to to the block.  */
+#define SBOX(which, r0, r1, r2, r3, r4) \
+	SBOX##which (r0, r1, r2, r3, r4)
+
+/* Apply inverse SBOX number WHICH to to the block.  */
+#define SBOX_INVERSE(which, r0, r1, r2, r3, r4) \
+	SBOX##which##_INVERSE (r0, r1, r2, r3, r4)
+
+/* XOR round key into block state in r0,r1,r2,r3. r4 used as temporary.  */
+#define BLOCK_XOR_KEY(r0, r1, r2, r3, r4, round) \
+	vbroadcastss (ctx_keys + (round) * 16 + 0 * 4)(CTX), r4; \
+	vpxor r0, r4, r0; \
+	vbroadcastss (ctx_keys + (round) * 16 + 1 * 4)(CTX), r4; \
+	vpxor r1, r4, r1; \
+	vbroadcastss (ctx_keys + (round) * 16 + 2 * 4)(CTX), r4; \
+	vpxor r2, r4, r2; \
+	vbroadcastss (ctx_keys + (round) * 16 + 3 * 4)(CTX), r4; \
+	vpxor r3, r4, r3;
+
+/* Apply the linear transformation to BLOCK.  */
+#define LINEAR_TRANSFORMATION(r0, r1, r2, r3, r4) \
+	vec_rol(r0, 13, r4);	\
+	vec_rol(r2, 3, r4);	\
+	vpxor r1, r0, r1;	\
+	vpxor r2, r1, r1;	\
+	vpslld $3, r0, r4;	\
+	vpxor r3, r2, r3;	\
+	vpxor r4, r3, r3;	\
+	vec_rol(r1, 1, r4);	\
+	vec_rol(r3, 7, r4);	\
+	vpxor r0, r1, r0;	\
+	vpxor r3, r0, r0;	\
+	vpslld $7, r1, r4;	\
+	vpxor r2, r3, r2;	\
+	vpxor r4, r2, r2;	\
+	vec_rol(r0, 5, r4);	\
+	vec_rol(r2, 22, r4);
+
+/* Apply the inverse linear transformation to BLOCK.  */
+#define LINEAR_TRANSFORMATION_INVERSE(r0, r1, r2, r3, r4) \
+	vec_ror(r2, 22, r4);	\
+	vec_ror(r0, 5, r4);	\
+	vpslld $7, r1, r4;	\
+	vpxor r2, r3, r2;	\
+	vpxor r4, r2, r2;	\
+	vpxor r0, r1, r0;	\
+	vpxor r3, r0, r0;	\
+	vec_ror(r3, 7, r4);	\
+	vec_ror(r1, 1, r4);	\
+	vpslld $3, r0, r4;	\
+	vpxor r3, r2, r3;	\
+	vpxor r4, r3, r3;	\
+	vpxor r1, r0, r1;	\
+	vpxor r2, r1, r1;	\
+	vec_ror(r2, 3, r4);	\
+	vec_ror(r0, 13, r4);
+
+/* Apply a Serpent round to eight parallel blocks.  This macro increments
+   `round'.  */
+#define ROUND(which, a0, a1, a2, a3, a4, b0, b1, b2, b3, b4) \
+	BLOCK_XOR_KEY (a0, a1, a2, a3, a4, round);	\
+	SBOX (which, a0, a1, a2, a3, a4);		\
+		BLOCK_XOR_KEY (b0, b1, b2, b3, b4, round);	\
+		SBOX (which, b0, b1, b2, b3, b4);		\
+	LINEAR_TRANSFORMATION (a0, a1, a2, a3, a4);	\
+		LINEAR_TRANSFORMATION (b0, b1, b2, b3, b4);	\
+	.set round, (round + 1);
+
+/* Apply the last Serpent round to eight parallel blocks.  This macro increments
+   `round'.  */
+#define ROUND_LAST(which, a0, a1, a2, a3, a4, b0, b1, b2, b3, b4) \
+	BLOCK_XOR_KEY (a0, a1, a2, a3, a4, round);	\
+	SBOX (which, a0, a1, a2, a3, a4);		\
+		BLOCK_XOR_KEY (b0, b1, b2, b3, b4, round);	\
+		SBOX (which, b0, b1, b2, b3, b4);		\
+	.set round, (round + 1);			\
+	BLOCK_XOR_KEY (a0, a1, a2, a3, a4, round);	\
+		BLOCK_XOR_KEY (b0, b1, b2, b3, b4, round);	\
+	.set round, (round + 1);
+
+/* Apply an inverse Serpent round to eight parallel blocks.  This macro
+   increments `round'.  */
+#define ROUND_INVERSE(which, a0, a1, a2, a3, a4, b0, b1, b2, b3, b4) \
+	LINEAR_TRANSFORMATION_INVERSE (a0, a1, a2, a3, a4);	\
+		LINEAR_TRANSFORMATION_INVERSE (b0, b1, b2, b3, b4);	\
+	SBOX_INVERSE (which, a0, a1, a2, a3, a4);		\
+	BLOCK_XOR_KEY (a0, a1, a2, a3, a4, round);		\
+		SBOX_INVERSE (which, b0, b1, b2, b3, b4);		\
+		BLOCK_XOR_KEY (b0, b1, b2, b3, b4, round);		\
+	.set round, (round - 1);
+
+/* Apply the first inverse Serpent round to eight parallel blocks.  This macro
+   increments `round'.  */
+#define ROUND_FIRST_INVERSE(which, a0, a1, a2, a3, a4, b0, b1, b2, b3, b4) \
+	BLOCK_XOR_KEY (a0, a1, a2, a3, a4, round);	\
+		BLOCK_XOR_KEY (b0, b1, b2, b3, b4, round);	\
+	.set round, (round - 1);			\
+	SBOX_INVERSE (which, a0, a1, a2, a3, a4); 	\
+	BLOCK_XOR_KEY (a0, a1, a2, a3, a4, round);	\
+		SBOX_INVERSE (which, b0, b1, b2, b3, b4); 	\
+		BLOCK_XOR_KEY (b0, b1, b2, b3, b4, round);	\
+	.set round, (round - 1);
+
+.data
+.align 16
+
+/* For CTR-mode IV byteswap */
+.Lbswap128_mask:
+	.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+.text
+
+.align 8
+.type   __serpent_enc_blk8, at function;
+__serpent_enc_blk8:
+	/* input:
+	 *	%rdi: ctx, CTX
+	 *	RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel plaintext
+	 *						blocks
+	 * output:
+	 *	RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel
+	 * 						ciphertext blocks
+	 */
+
+	/* record input vector names for __serpent_enc_blk8 */
+	.set enc_in_a0, RA0
+	.set enc_in_a1, RA1
+	.set enc_in_a2, RA2
+	.set enc_in_a3, RA3
+	.set enc_in_b0, RB0
+	.set enc_in_b1, RB1
+	.set enc_in_b2, RB2
+	.set enc_in_b3, RB3
+
+	vpcmpeqd RNOT, RNOT, RNOT;
+
+	transpose_4x4(RA0, RA1, RA2, RA3, RA4, RTMP0);
+	transpose_4x4(RB0, RB1, RB2, RB3, RB4, RTMP1);
+
+	.set round, 0
+	ROUND (0, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (1, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (2, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (3, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (4, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (5, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (6, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (7, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (0, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (1, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (2, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (3, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (4, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (5, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (6, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (7, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (0, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (1, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (2, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (3, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (4, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (5, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (6, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (7, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (0, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (1, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (2, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (3, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (4, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (5, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND (6, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+
+	ROUND_LAST (7, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+
+	transpose_4x4(RA0, RA1, RA2, RA3, RA4, RTMP0);
+	transpose_4x4(RB0, RB1, RB2, RB3, RB4, RTMP1);
+
+	/* record output vector names for __serpent_enc_blk8 */
+	.set enc_out_a0, RA0
+	.set enc_out_a1, RA1
+	.set enc_out_a2, RA2
+	.set enc_out_a3, RA3
+	.set enc_out_b0, RB0
+	.set enc_out_b1, RB1
+	.set enc_out_b2, RB2
+	.set enc_out_b3, RB3
+
+	ret;
+.size __serpent_enc_blk8,.-__serpent_enc_blk8;
+
+.align 8
+.type   __serpent_dec_blk8, at function;
+__serpent_dec_blk8:
+	/* input:
+	 *	%rdi: ctx, CTX
+	 *	RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel
+	 * 						ciphertext blocks
+	 * output:
+	 *	RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel plaintext
+	 *						blocks
+	 */
+
+	/* record input vector names for __serpent_dec_blk8 */
+	.set dec_in_a0, RA0
+	.set dec_in_a1, RA1
+	.set dec_in_a2, RA2
+	.set dec_in_a3, RA3
+	.set dec_in_b0, RB0
+	.set dec_in_b1, RB1
+	.set dec_in_b2, RB2
+	.set dec_in_b3, RB3
+
+	vpcmpeqd RNOT, RNOT, RNOT;
+
+	transpose_4x4(RA0, RA1, RA2, RA3, RA4, RTMP0);
+	transpose_4x4(RB0, RB1, RB2, RB3, RB4, RTMP1);
+
+	.set round, 32
+	ROUND_FIRST_INVERSE (7, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+
+	ROUND_INVERSE (6, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (5, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (4, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (3, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (2, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (1, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (0, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (7, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (6, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (5, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (4, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (3, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (2, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (1, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (0, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (7, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (6, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (5, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (4, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (3, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (2, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (1, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (0, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (7, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (6, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (5, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (4, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (3, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (2, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (1, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+	ROUND_INVERSE (0, RA0, RA1, RA2, RA3, RA4, RB0, RB1, RB2, RB3, RB4);
+
+	transpose_4x4(RA0, RA1, RA2, RA3, RA4, RTMP0);
+	transpose_4x4(RB0, RB1, RB2, RB3, RB4, RTMP1);
+
+	/* record output vector names for __serpent_dec_blk8 */
+	.set dec_out_a0, RA0
+	.set dec_out_a1, RA1
+	.set dec_out_a2, RA2
+	.set dec_out_a3, RA3
+	.set dec_out_b0, RB0
+	.set dec_out_b1, RB1
+	.set dec_out_b2, RB2
+	.set dec_out_b3, RB3
+
+	ret;
+.size __serpent_dec_blk8,.-__serpent_dec_blk8;
+
+#define inc_le128(x, minus_one, tmp) \
+	vpcmpeqq minus_one, x, tmp; \
+	vpsubq minus_one, x, x; \
+	vpslldq $8, tmp, tmp; \
+	vpsubq tmp, x, x;
+
+.align 8
+.global _gcry_serpent_avx_ctr_enc
+.type   _gcry_serpent_avx_ctr_enc, at function;
+_gcry_serpent_avx_ctr_enc:
+	/* input:
+	 *	%rdi: ctx, CTX
+	 *	%rsi: dst (8 blocks)
+	 *	%rdx: src (8 blocks)
+	 *	%rcx: iv (big endian, 128bit)
+	 */
+
+	.set RA0, enc_in_a0
+	.set RA1, enc_in_a1
+	.set RA2, enc_in_a2
+	.set RA3, enc_in_a3
+	.set RB0, enc_in_b0
+	.set RB1, enc_in_b1
+	.set RB2, enc_in_b2
+	.set RB3, enc_in_b3
+
+	vzeroupper;
+
+	vmovdqa .Lbswap128_mask RIP, RTMP1;
+
+	/* load IV and byteswap */
+	vmovdqu (%rcx), RA0;
+	vpshufb RTMP1, RA0, RTMP0; /* be => le */
+
+	vpcmpeqd RNOT, RNOT, RNOT;
+	vpsrldq $8, RNOT, RNOT; /* low: -1, high: 0 */
+
+	/* construct IVs */
+	inc_le128(RTMP0, RNOT, RB3);
+	vpshufb RTMP1, RTMP0, RA1;
+	inc_le128(RTMP0, RNOT, RB3);
+	vpshufb RTMP1, RTMP0, RA2;
+	inc_le128(RTMP0, RNOT, RB3);
+	vpshufb RTMP1, RTMP0, RA3;
+	inc_le128(RTMP0, RNOT, RB3);
+	vpshufb RTMP1, RTMP0, RB0;
+	inc_le128(RTMP0, RNOT, RB3);
+	vpshufb RTMP1, RTMP0, RB1;
+	inc_le128(RTMP0, RNOT, RB3);
+	vpshufb RTMP1, RTMP0, RB2;
+	inc_le128(RTMP0, RNOT, RB3);
+	vpshufb RTMP1, RTMP0, RB3;
+	inc_le128(RTMP0, RNOT, RTMP2);
+	vpshufb RTMP1, RTMP0, RTMP0;
+	vmovdqu RTMP0, (%rcx); /* store new IV */
+
+	call __serpent_enc_blk8;
+
+	.set RA0, enc_out_a0
+	.set RA1, enc_out_a1
+	.set RA2, enc_out_a2
+	.set RA3, enc_out_a3
+	.set RB0, enc_out_b0
+	.set RB1, enc_out_b1
+	.set RB2, enc_out_b2
+	.set RB3, enc_out_b3
+
+	vmovdqu (0 * 16)(%rdx), RNOT;
+	vpxor RA0, RNOT, RA0;
+	vmovdqu (1 * 16)(%rdx), RNOT;
+	vpxor RA1, RNOT, RA1;
+	vmovdqu (2 * 16)(%rdx), RNOT;
+	vpxor RA2, RNOT, RA2;
+	vmovdqu (3 * 16)(%rdx), RNOT;
+	vpxor RA3, RNOT, RA3;
+	vmovdqu (4 * 16)(%rdx), RNOT;
+	vpxor RB0, RNOT, RB0;
+	vmovdqu (5 * 16)(%rdx), RNOT;
+	vpxor RB1, RNOT, RB1;
+	vmovdqu (6 * 16)(%rdx), RNOT;
+	vpxor RB2, RNOT, RB2;
+	vmovdqu (7 * 16)(%rdx), RNOT;
+	vpxor RB3, RNOT, RB3;
+
+	vmovdqu RA0, (0 * 16)(%rsi);
+	vmovdqu RA1, (1 * 16)(%rsi);
+	vmovdqu RA2, (2 * 16)(%rsi);
+	vmovdqu RA3, (3 * 16)(%rsi);
+	vmovdqu RB0, (4 * 16)(%rsi);
+	vmovdqu RB1, (5 * 16)(%rsi);
+	vmovdqu RB2, (6 * 16)(%rsi);
+	vmovdqu RB3, (7 * 16)(%rsi);
+
+	ret
+.size _gcry_serpent_avx_ctr_enc,.-_gcry_serpent_avx_ctr_enc;
+
+.align 8
+.global _gcry_serpent_avx_cbc_dec
+.type   _gcry_serpent_avx_cbc_dec, at function;
+_gcry_serpent_avx_cbc_dec:
+	/* input:
+	 *	%rdi: ctx, CTX
+	 *	%rsi: dst (8 blocks)
+	 *	%rdx: src (8 blocks)
+	 *	%rcx: iv
+	 */
+
+	.set RA0, dec_in_a0
+	.set RA1, dec_in_a1
+	.set RA2, dec_in_a2
+	.set RA3, dec_in_a3
+	.set RB0, dec_in_b0
+	.set RB1, dec_in_b1
+	.set RB2, dec_in_b2
+	.set RB3, dec_in_b3
+
+	vzeroupper;
+
+	vmovdqu (0 * 16)(%rdx), RA0;
+	vmovdqu (1 * 16)(%rdx), RA1;
+	vmovdqu (2 * 16)(%rdx), RA2;
+	vmovdqu (3 * 16)(%rdx), RA3;
+	vmovdqu (4 * 16)(%rdx), RB0;
+	vmovdqu (5 * 16)(%rdx), RB1;
+	vmovdqu (6 * 16)(%rdx), RB2;
+	vmovdqu (7 * 16)(%rdx), RB3;
+
+	call __serpent_dec_blk8;
+
+	.set RA0, dec_out_a0
+	.set RA1, dec_out_a1
+	.set RA2, dec_out_a2
+	.set RA3, dec_out_a3
+	.set RB0, dec_out_b0
+	.set RB1, dec_out_b1
+	.set RB2, dec_out_b2
+	.set RB3, dec_out_b3
+
+	vmovdqu (7 * 16)(%rdx), RNOT;
+	vpxor (%rcx), RA0, RA0;
+	vpxor (0 * 16)(%rdx), RA1, RA1;
+	vpxor (1 * 16)(%rdx), RA2, RA2;
+	vpxor (2 * 16)(%rdx), RA3, RA3;
+	vpxor (3 * 16)(%rdx), RB0, RB0;
+	vpxor (4 * 16)(%rdx), RB1, RB1;
+	vpxor (5 * 16)(%rdx), RB2, RB2;
+	vpxor (6 * 16)(%rdx), RB3, RB3;
+	vmovdqu RNOT, (%rcx); /* store new IV */
+
+	vmovdqu RA0, (0 * 16)(%rsi);
+	vmovdqu RA1, (1 * 16)(%rsi);
+	vmovdqu RA2, (2 * 16)(%rsi);
+	vmovdqu RA3, (3 * 16)(%rsi);
+	vmovdqu RB0, (4 * 16)(%rsi);
+	vmovdqu RB1, (5 * 16)(%rsi);
+	vmovdqu RB2, (6 * 16)(%rsi);
+	vmovdqu RB3, (7 * 16)(%rsi);
+
+	ret
+.size _gcry_serpent_avx_cbc_dec,.-_gcry_serpent_avx_cbc_dec;
+
+#endif /*defined(ENABLE_AVX_SUPPORT) && defined(USE_SERPENT)*/
+#endif /*__x86_64*/
diff --git a/configure.ac b/configure.ac
index 079951d..5ad2ebb 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1219,6 +1219,11 @@ LIST_MEMBER(serpent, $enabled_ciphers)
 if test "$found" = "1" ; then
    GCRYPT_CIPHERS="$GCRYPT_CIPHERS serpent.lo"
    AC_DEFINE(USE_SERPENT, 1, [Defined if this module should be included])
+
+   if test x"$avxsupport" = xyes ; then
+      # Build with the AVX implementation
+      GCRYPT_CIPHERS="$GCRYPT_CIPHERS serpent_avx_x86-64.lo"
+   fi
 fi
 
 LIST_MEMBER(rfc2268, $enabled_ciphers)
diff --git a/src/cipher.h b/src/cipher.h
index 17fec6c..4e68487 100644
--- a/src/cipher.h
+++ b/src/cipher.h
@@ -103,6 +103,13 @@ void _gcry_camellia_cbc_dec (void *context, unsigned char *iv,
                              void *outbuf_arg, const void *inbuf_arg,
                              unsigned int nblocks);
 
+/*-- serpent.c --*/
+void _gcry_serpent_ctr_enc (void *context, unsigned char *ctr,
+                            void *outbuf_arg, const void *inbuf_arg,
+                            unsigned int nblocks);
+void _gcry_serpent_cbc_dec (void *context, unsigned char *iv,
+                            void *outbuf_arg, const void *inbuf_arg,
+                            unsigned int nblocks);
 
 /*-- dsa.c --*/
 void _gcry_register_pk_dsa_progress (gcry_handler_progress_t cbc, void *cb_data);




More information about the Gcrypt-devel mailing list