[PATCH 1/2] Fix OCB amd64 assembly implementations for x32

Jussi Kivilinna jussi.kivilinna at iki.fi
Sat Oct 24 15:29:57 CEST 2015


* cipher/camellia-glue.c (_gcry_camellia_aesni_avx_ocb_enc)
(_gcry_camellia_aesni_avx_ocb_dec, _gcry_camellia_aesni_avx_ocb_auth)
(_gcry_camellia_aesni_avx2_ocb_enc, _gcry_camellia_aesni_avx2_ocb_dec)
(_gcry_camellia_aesni_avx2_ocb_auth, _gcry_camellia_ocb_crypt)
(_gcry_camellia_ocb_auth): Change 'Ls' from pointer array to u64 array.
* cipher/serpent.c (_gcry_serpent_sse2_ocb_enc)
(_gcry_serpent_sse2_ocb_dec, _gcry_serpent_sse2_ocb_auth)
(_gcry_serpent_avx2_ocb_enc, _gcry_serpent_avx2_ocb_dec)
(_gcry_serpent_ocb_crypt, _gcry_serpent_ocb_auth): Ditto.
* cipher/twofish.c (_gcry_twofish_amd64_ocb_enc)
(_gcry_twofish_amd64_ocb_dec, _gcry_twofish_amd64_ocb_auth)
(twofish_amd64_ocb_enc, twofish_amd64_ocb_dec, twofish_amd64_ocb_auth)
(_gcry_twofish_ocb_crypt, _gcry_twofish_ocb_auth): Ditto.
--

Pointers on x32 are 32-bit, but amd64 assembly implementations
expect 64-bit pointers. Pass 'Ls' array to 64-bit integers so
that input arrays has correct format for assembly functions.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>
---
 cipher/camellia-glue.c |  116 ++++++++++++++++++++++++++----------------------
 cipher/serpent.c       |  104 +++++++++++++++++++++++--------------------
 cipher/twofish.c       |   32 +++++++------
 3 files changed, 136 insertions(+), 116 deletions(-)

diff --git a/cipher/camellia-glue.c b/cipher/camellia-glue.c
index dee0169..dfddb4a 100644
--- a/cipher/camellia-glue.c
+++ b/cipher/camellia-glue.c
@@ -141,20 +141,20 @@ extern void _gcry_camellia_aesni_avx_ocb_enc(CAMELLIA_context *ctx,
 					     const unsigned char *in,
 					     unsigned char *offset,
 					     unsigned char *checksum,
-					     const void *Ls[16]) ASM_FUNC_ABI;
+					     const u64 Ls[16]) ASM_FUNC_ABI;
 
 extern void _gcry_camellia_aesni_avx_ocb_dec(CAMELLIA_context *ctx,
 					     unsigned char *out,
 					     const unsigned char *in,
 					     unsigned char *offset,
 					     unsigned char *checksum,
-					     const void *Ls[16]) ASM_FUNC_ABI;
+					     const u64 Ls[16]) ASM_FUNC_ABI;
 
 extern void _gcry_camellia_aesni_avx_ocb_auth(CAMELLIA_context *ctx,
 					     const unsigned char *abuf,
 					     unsigned char *offset,
 					     unsigned char *checksum,
-					     const void *Ls[16]) ASM_FUNC_ABI;
+					     const u64 Ls[16]) ASM_FUNC_ABI;
 
 extern void _gcry_camellia_aesni_avx_keygen(CAMELLIA_context *ctx,
 					    const unsigned char *key,
@@ -185,20 +185,20 @@ extern void _gcry_camellia_aesni_avx2_ocb_enc(CAMELLIA_context *ctx,
 					      const unsigned char *in,
 					      unsigned char *offset,
 					      unsigned char *checksum,
-					      const void *Ls[32]) ASM_FUNC_ABI;
+					      const u64 Ls[32]) ASM_FUNC_ABI;
 
 extern void _gcry_camellia_aesni_avx2_ocb_dec(CAMELLIA_context *ctx,
 					      unsigned char *out,
 					      const unsigned char *in,
 					      unsigned char *offset,
 					      unsigned char *checksum,
-					      const void *Ls[32]) ASM_FUNC_ABI;
+					      const u64 Ls[32]) ASM_FUNC_ABI;
 
 extern void _gcry_camellia_aesni_avx2_ocb_auth(CAMELLIA_context *ctx,
 					       const unsigned char *abuf,
 					       unsigned char *offset,
 					       unsigned char *checksum,
-					       const void *Ls[32]) ASM_FUNC_ABI;
+					       const u64 Ls[32]) ASM_FUNC_ABI;
 #endif
 
 static const char *selftest(void);
@@ -630,27 +630,29 @@ _gcry_camellia_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
   if (ctx->use_aesni_avx2)
     {
       int did_use_aesni_avx2 = 0;
-      const void *Ls[32];
+      u64 Ls[32];
       unsigned int n = 32 - (blkn % 32);
-      const void **l;
+      u64 *l;
       int i;
 
       if (nblocks >= 32)
 	{
 	  for (i = 0; i < 32; i += 8)
 	    {
-	      Ls[(i + 0 + n) % 32] = c->u_mode.ocb.L[0];
-	      Ls[(i + 1 + n) % 32] = c->u_mode.ocb.L[1];
-	      Ls[(i + 2 + n) % 32] = c->u_mode.ocb.L[0];
-	      Ls[(i + 3 + n) % 32] = c->u_mode.ocb.L[2];
-	      Ls[(i + 4 + n) % 32] = c->u_mode.ocb.L[0];
-	      Ls[(i + 5 + n) % 32] = c->u_mode.ocb.L[1];
-	      Ls[(i + 6 + n) % 32] = c->u_mode.ocb.L[0];
+	      /* Use u64 to store pointers for x32 support (assembly function
+	       * assumes 64-bit pointers). */
+	      Ls[(i + 0 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	      Ls[(i + 1 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[1];
+	      Ls[(i + 2 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	      Ls[(i + 3 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[2];
+	      Ls[(i + 4 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	      Ls[(i + 5 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[1];
+	      Ls[(i + 6 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
 	    }
 
-	  Ls[(7 + n) % 32] = c->u_mode.ocb.L[3];
-	  Ls[(15 + n) % 32] = c->u_mode.ocb.L[4];
-	  Ls[(23 + n) % 32] = c->u_mode.ocb.L[3];
+	  Ls[(7 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[3];
+	  Ls[(15 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[4];
+	  Ls[(23 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[3];
 	  l = &Ls[(31 + n) % 32];
 
 	  /* Process data in 32 block chunks. */
@@ -658,7 +660,7 @@ _gcry_camellia_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
 	    {
 	      /* l_tmp will be used only every 65536-th block. */
 	      blkn += 32;
-	      *l = ocb_get_l(c, l_tmp, blkn - blkn % 32);
+	      *l = (uintptr_t)(void *)ocb_get_l(c, l_tmp, blkn - blkn % 32);
 
 	      if (encrypt)
 		_gcry_camellia_aesni_avx2_ocb_enc(ctx, outbuf, inbuf, c->u_iv.iv,
@@ -691,25 +693,27 @@ _gcry_camellia_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
   if (ctx->use_aesni_avx)
     {
       int did_use_aesni_avx = 0;
-      const void *Ls[16];
+      u64 Ls[16];
       unsigned int n = 16 - (blkn % 16);
-      const void **l;
+      u64 *l;
       int i;
 
       if (nblocks >= 16)
 	{
 	  for (i = 0; i < 16; i += 8)
 	    {
-	      Ls[(i + 0 + n) % 16] = c->u_mode.ocb.L[0];
-	      Ls[(i + 1 + n) % 16] = c->u_mode.ocb.L[1];
-	      Ls[(i + 2 + n) % 16] = c->u_mode.ocb.L[0];
-	      Ls[(i + 3 + n) % 16] = c->u_mode.ocb.L[2];
-	      Ls[(i + 4 + n) % 16] = c->u_mode.ocb.L[0];
-	      Ls[(i + 5 + n) % 16] = c->u_mode.ocb.L[1];
-	      Ls[(i + 6 + n) % 16] = c->u_mode.ocb.L[0];
+	      /* Use u64 to store pointers for x32 support (assembly function
+	       * assumes 64-bit pointers). */
+	      Ls[(i + 0 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	      Ls[(i + 1 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[1];
+	      Ls[(i + 2 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	      Ls[(i + 3 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[2];
+	      Ls[(i + 4 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	      Ls[(i + 5 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[1];
+	      Ls[(i + 6 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
 	    }
 
-	  Ls[(7 + n) % 16] = c->u_mode.ocb.L[3];
+	  Ls[(7 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[3];
 	  l = &Ls[(15 + n) % 16];
 
 	  /* Process data in 16 block chunks. */
@@ -717,7 +721,7 @@ _gcry_camellia_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
 	    {
 	      /* l_tmp will be used only every 65536-th block. */
 	      blkn += 16;
-	      *l = ocb_get_l(c, l_tmp, blkn - blkn % 16);
+	      *l = (uintptr_t)(void *)ocb_get_l(c, l_tmp, blkn - blkn % 16);
 
 	      if (encrypt)
 		_gcry_camellia_aesni_avx_ocb_enc(ctx, outbuf, inbuf, c->u_iv.iv,
@@ -780,27 +784,29 @@ _gcry_camellia_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg,
   if (ctx->use_aesni_avx2)
     {
       int did_use_aesni_avx2 = 0;
-      const void *Ls[32];
+      u64 Ls[32];
       unsigned int n = 32 - (blkn % 32);
-      const void **l;
+      u64 *l;
       int i;
 
       if (nblocks >= 32)
 	{
 	  for (i = 0; i < 32; i += 8)
 	    {
-	      Ls[(i + 0 + n) % 32] = c->u_mode.ocb.L[0];
-	      Ls[(i + 1 + n) % 32] = c->u_mode.ocb.L[1];
-	      Ls[(i + 2 + n) % 32] = c->u_mode.ocb.L[0];
-	      Ls[(i + 3 + n) % 32] = c->u_mode.ocb.L[2];
-	      Ls[(i + 4 + n) % 32] = c->u_mode.ocb.L[0];
-	      Ls[(i + 5 + n) % 32] = c->u_mode.ocb.L[1];
-	      Ls[(i + 6 + n) % 32] = c->u_mode.ocb.L[0];
+	      /* Use u64 to store pointers for x32 support (assembly function
+	       * assumes 64-bit pointers). */
+	      Ls[(i + 0 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	      Ls[(i + 1 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[1];
+	      Ls[(i + 2 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	      Ls[(i + 3 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[2];
+	      Ls[(i + 4 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	      Ls[(i + 5 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[1];
+	      Ls[(i + 6 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
 	    }
 
-	  Ls[(7 + n) % 32] = c->u_mode.ocb.L[3];
-	  Ls[(15 + n) % 32] = c->u_mode.ocb.L[4];
-	  Ls[(23 + n) % 32] = c->u_mode.ocb.L[3];
+	  Ls[(7 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[3];
+	  Ls[(15 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[4];
+	  Ls[(23 + n) % 32] = (uintptr_t)(void *)c->u_mode.ocb.L[3];
 	  l = &Ls[(31 + n) % 32];
 
 	  /* Process data in 32 block chunks. */
@@ -808,7 +814,7 @@ _gcry_camellia_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg,
 	    {
 	      /* l_tmp will be used only every 65536-th block. */
 	      blkn += 32;
-	      *l = ocb_get_l(c, l_tmp, blkn - blkn % 32);
+	      *l = (uintptr_t)(void *)ocb_get_l(c, l_tmp, blkn - blkn % 32);
 
 	      _gcry_camellia_aesni_avx2_ocb_auth(ctx, abuf,
 						 c->u_mode.ocb.aad_offset,
@@ -837,25 +843,27 @@ _gcry_camellia_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg,
   if (ctx->use_aesni_avx)
     {
       int did_use_aesni_avx = 0;
-      const void *Ls[16];
+      u64 Ls[16];
       unsigned int n = 16 - (blkn % 16);
-      const void **l;
+      u64 *l;
       int i;
 
       if (nblocks >= 16)
 	{
 	  for (i = 0; i < 16; i += 8)
 	    {
-	      Ls[(i + 0 + n) % 16] = c->u_mode.ocb.L[0];
-	      Ls[(i + 1 + n) % 16] = c->u_mode.ocb.L[1];
-	      Ls[(i + 2 + n) % 16] = c->u_mode.ocb.L[0];
-	      Ls[(i + 3 + n) % 16] = c->u_mode.ocb.L[2];
-	      Ls[(i + 4 + n) % 16] = c->u_mode.ocb.L[0];
-	      Ls[(i + 5 + n) % 16] = c->u_mode.ocb.L[1];
-	      Ls[(i + 6 + n) % 16] = c->u_mode.ocb.L[0];
+	      /* Use u64 to store pointers for x32 support (assembly function
+	       * assumes 64-bit pointers). */
+	      Ls[(i + 0 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	      Ls[(i + 1 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[1];
+	      Ls[(i + 2 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	      Ls[(i + 3 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[2];
+	      Ls[(i + 4 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	      Ls[(i + 5 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[1];
+	      Ls[(i + 6 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
 	    }
 
-	  Ls[(7 + n) % 16] = c->u_mode.ocb.L[3];
+	  Ls[(7 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[3];
 	  l = &Ls[(15 + n) % 16];
 
 	  /* Process data in 16 block chunks. */
@@ -863,7 +871,7 @@ _gcry_camellia_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg,
 	    {
 	      /* l_tmp will be used only every 65536-th block. */
 	      blkn += 16;
-	      *l = ocb_get_l(c, l_tmp, blkn - blkn % 16);
+	      *l = (uintptr_t)(void *)ocb_get_l(c, l_tmp, blkn - blkn % 16);
 
 	      _gcry_camellia_aesni_avx_ocb_auth(ctx, abuf,
 						c->u_mode.ocb.aad_offset,
diff --git a/cipher/serpent.c b/cipher/serpent.c
index fc3afa6..4ef7f52 100644
--- a/cipher/serpent.c
+++ b/cipher/serpent.c
@@ -125,20 +125,20 @@ extern void _gcry_serpent_sse2_ocb_enc(serpent_context_t *ctx,
 				       const unsigned char *in,
 				       unsigned char *offset,
 				       unsigned char *checksum,
-				       const void *Ls[8]) ASM_FUNC_ABI;
+				       const u64 Ls[8]) ASM_FUNC_ABI;
 
 extern void _gcry_serpent_sse2_ocb_dec(serpent_context_t *ctx,
 				       unsigned char *out,
 				       const unsigned char *in,
 				       unsigned char *offset,
 				       unsigned char *checksum,
-				       const void *Ls[8]) ASM_FUNC_ABI;
+				       const u64 Ls[8]) ASM_FUNC_ABI;
 
 extern void _gcry_serpent_sse2_ocb_auth(serpent_context_t *ctx,
 					const unsigned char *abuf,
 					unsigned char *offset,
 					unsigned char *checksum,
-					const void *Ls[8]) ASM_FUNC_ABI;
+					const u64 Ls[8]) ASM_FUNC_ABI;
 #endif
 
 #ifdef USE_AVX2
@@ -165,20 +165,20 @@ extern void _gcry_serpent_avx2_ocb_enc(serpent_context_t *ctx,
 				       const unsigned char *in,
 				       unsigned char *offset,
 				       unsigned char *checksum,
-				       const void *Ls[16]) ASM_FUNC_ABI;
+				       const u64 Ls[16]) ASM_FUNC_ABI;
 
 extern void _gcry_serpent_avx2_ocb_dec(serpent_context_t *ctx,
 				       unsigned char *out,
 				       const unsigned char *in,
 				       unsigned char *offset,
 				       unsigned char *checksum,
-				       const void *Ls[16]) ASM_FUNC_ABI;
+				       const u64 Ls[16]) ASM_FUNC_ABI;
 
 extern void _gcry_serpent_avx2_ocb_auth(serpent_context_t *ctx,
 					const unsigned char *abuf,
 					unsigned char *offset,
 					unsigned char *checksum,
-					const void *Ls[16]) ASM_FUNC_ABI;
+					const u64 Ls[16]) ASM_FUNC_ABI;
 #endif
 
 #ifdef USE_NEON
@@ -1249,25 +1249,27 @@ _gcry_serpent_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
   if (ctx->use_avx2)
     {
       int did_use_avx2 = 0;
-      const void *Ls[16];
+      u64 Ls[16];
       unsigned int n = 16 - (blkn % 16);
-      const void **l;
+      u64 *l;
       int i;
 
       if (nblocks >= 16)
 	{
 	  for (i = 0; i < 16; i += 8)
 	    {
-	      Ls[(i + 0 + n) % 16] = c->u_mode.ocb.L[0];
-	      Ls[(i + 1 + n) % 16] = c->u_mode.ocb.L[1];
-	      Ls[(i + 2 + n) % 16] = c->u_mode.ocb.L[0];
-	      Ls[(i + 3 + n) % 16] = c->u_mode.ocb.L[2];
-	      Ls[(i + 4 + n) % 16] = c->u_mode.ocb.L[0];
-	      Ls[(i + 5 + n) % 16] = c->u_mode.ocb.L[1];
-	      Ls[(i + 6 + n) % 16] = c->u_mode.ocb.L[0];
+	      /* Use u64 to store pointers for x32 support (assembly function
+	       * assumes 64-bit pointers). */
+	      Ls[(i + 0 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	      Ls[(i + 1 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[1];
+	      Ls[(i + 2 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	      Ls[(i + 3 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[2];
+	      Ls[(i + 4 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	      Ls[(i + 5 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[1];
+	      Ls[(i + 6 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
 	    }
 
-	  Ls[(7 + n) % 16] = c->u_mode.ocb.L[3];
+	  Ls[(7 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[3];
 	  l = &Ls[(15 + n) % 16];
 
 	  /* Process data in 16 block chunks. */
@@ -1275,7 +1277,7 @@ _gcry_serpent_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
 	    {
 	      /* l_tmp will be used only every 65536-th block. */
 	      blkn += 16;
-	      *l = ocb_get_l(c, l_tmp, blkn - blkn % 16);
+	      *l = (uintptr_t)(void *)ocb_get_l(c, l_tmp, blkn - blkn % 16);
 
 	      if (encrypt)
 		_gcry_serpent_avx2_ocb_enc(ctx, outbuf, inbuf, c->u_iv.iv,
@@ -1305,19 +1307,21 @@ _gcry_serpent_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
 #ifdef USE_SSE2
   {
     int did_use_sse2 = 0;
-    const void *Ls[8];
+    u64 Ls[8];
     unsigned int n = 8 - (blkn % 8);
-    const void **l;
+    u64 *l;
 
     if (nblocks >= 8)
       {
-	Ls[(0 + n) % 8] = c->u_mode.ocb.L[0];
-	Ls[(1 + n) % 8] = c->u_mode.ocb.L[1];
-	Ls[(2 + n) % 8] = c->u_mode.ocb.L[0];
-	Ls[(3 + n) % 8] = c->u_mode.ocb.L[2];
-	Ls[(4 + n) % 8] = c->u_mode.ocb.L[0];
-	Ls[(5 + n) % 8] = c->u_mode.ocb.L[1];
-	Ls[(6 + n) % 8] = c->u_mode.ocb.L[0];
+	/* Use u64 to store pointers for x32 support (assembly function
+	  * assumes 64-bit pointers). */
+	Ls[(0 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	Ls[(1 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[1];
+	Ls[(2 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	Ls[(3 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[2];
+	Ls[(4 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	Ls[(5 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[1];
+	Ls[(6 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
 	l = &Ls[(7 + n) % 8];
 
 	/* Process data in 8 block chunks. */
@@ -1325,7 +1329,7 @@ _gcry_serpent_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
 	  {
 	    /* l_tmp will be used only every 65536-th block. */
 	    blkn += 8;
-	    *l = ocb_get_l(c, l_tmp, blkn - blkn % 8);
+	    *l = (uintptr_t)(void *)ocb_get_l(c, l_tmp, blkn - blkn % 8);
 
 	    if (encrypt)
 	      _gcry_serpent_sse2_ocb_enc(ctx, outbuf, inbuf, c->u_iv.iv,
@@ -1435,25 +1439,27 @@ _gcry_serpent_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg,
   if (ctx->use_avx2)
     {
       int did_use_avx2 = 0;
-      const void *Ls[16];
+      u64 Ls[16];
       unsigned int n = 16 - (blkn % 16);
-      const void **l;
+      u64 *l;
       int i;
 
       if (nblocks >= 16)
 	{
 	  for (i = 0; i < 16; i += 8)
 	    {
-	      Ls[(i + 0 + n) % 16] = c->u_mode.ocb.L[0];
-	      Ls[(i + 1 + n) % 16] = c->u_mode.ocb.L[1];
-	      Ls[(i + 2 + n) % 16] = c->u_mode.ocb.L[0];
-	      Ls[(i + 3 + n) % 16] = c->u_mode.ocb.L[2];
-	      Ls[(i + 4 + n) % 16] = c->u_mode.ocb.L[0];
-	      Ls[(i + 5 + n) % 16] = c->u_mode.ocb.L[1];
-	      Ls[(i + 6 + n) % 16] = c->u_mode.ocb.L[0];
+	      /* Use u64 to store pointers for x32 support (assembly function
+	       * assumes 64-bit pointers). */
+	      Ls[(i + 0 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	      Ls[(i + 1 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[1];
+	      Ls[(i + 2 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	      Ls[(i + 3 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[2];
+	      Ls[(i + 4 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	      Ls[(i + 5 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[1];
+	      Ls[(i + 6 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
 	    }
 
-	  Ls[(7 + n) % 16] = c->u_mode.ocb.L[3];
+	  Ls[(7 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[3];
 	  l = &Ls[(15 + n) % 16];
 
 	  /* Process data in 16 block chunks. */
@@ -1461,7 +1467,7 @@ _gcry_serpent_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg,
 	    {
 	      /* l_tmp will be used only every 65536-th block. */
 	      blkn += 16;
-	      *l = ocb_get_l(c, l_tmp, blkn - blkn % 16);
+	      *l = (uintptr_t)(void *)ocb_get_l(c, l_tmp, blkn - blkn % 16);
 
 	      _gcry_serpent_avx2_ocb_auth(ctx, abuf, c->u_mode.ocb.aad_offset,
 					  c->u_mode.ocb.aad_sum, Ls);
@@ -1486,19 +1492,21 @@ _gcry_serpent_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg,
 #ifdef USE_SSE2
   {
     int did_use_sse2 = 0;
-    const void *Ls[8];
+    u64 Ls[8];
     unsigned int n = 8 - (blkn % 8);
-    const void **l;
+    u64 *l;
 
     if (nblocks >= 8)
       {
-	Ls[(0 + n) % 8] = c->u_mode.ocb.L[0];
-	Ls[(1 + n) % 8] = c->u_mode.ocb.L[1];
-	Ls[(2 + n) % 8] = c->u_mode.ocb.L[0];
-	Ls[(3 + n) % 8] = c->u_mode.ocb.L[2];
-	Ls[(4 + n) % 8] = c->u_mode.ocb.L[0];
-	Ls[(5 + n) % 8] = c->u_mode.ocb.L[1];
-	Ls[(6 + n) % 8] = c->u_mode.ocb.L[0];
+	/* Use u64 to store pointers for x32 support (assembly function
+	* assumes 64-bit pointers). */
+	Ls[(0 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	Ls[(1 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[1];
+	Ls[(2 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	Ls[(3 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[2];
+	Ls[(4 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
+	Ls[(5 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[1];
+	Ls[(6 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0];
 	l = &Ls[(7 + n) % 8];
 
 	/* Process data in 8 block chunks. */
@@ -1506,7 +1514,7 @@ _gcry_serpent_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg,
 	  {
 	    /* l_tmp will be used only every 65536-th block. */
 	    blkn += 8;
-	    *l = ocb_get_l(c, l_tmp, blkn - blkn % 8);
+	    *l = (uintptr_t)(void *)ocb_get_l(c, l_tmp, blkn - blkn % 8);
 
 	    _gcry_serpent_sse2_ocb_auth(ctx, abuf, c->u_mode.ocb.aad_offset,
 					c->u_mode.ocb.aad_sum, Ls);
diff --git a/cipher/twofish.c b/cipher/twofish.c
index 7f361c9..f6ecd67 100644
--- a/cipher/twofish.c
+++ b/cipher/twofish.c
@@ -734,15 +734,15 @@ extern void _gcry_twofish_amd64_cfb_dec(const TWOFISH_context *c, byte *out,
 
 extern void _gcry_twofish_amd64_ocb_enc(const TWOFISH_context *ctx, byte *out,
 					const byte *in, byte *offset,
-					byte *checksum, const void *Ls[3]);
+					byte *checksum, const u64 Ls[3]);
 
 extern void _gcry_twofish_amd64_ocb_dec(const TWOFISH_context *ctx, byte *out,
 					const byte *in, byte *offset,
-					byte *checksum, const void *Ls[3]);
+					byte *checksum, const u64 Ls[3]);
 
 extern void _gcry_twofish_amd64_ocb_auth(const TWOFISH_context *ctx,
 					 const byte *abuf, byte *offset,
-					 byte *checksum, const void *Ls[3]);
+					 byte *checksum, const u64 Ls[3]);
 
 #ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS
 static inline void
@@ -854,7 +854,7 @@ twofish_amd64_cfb_dec(const TWOFISH_context *c, byte *out, const byte *in,
 
 static inline void
 twofish_amd64_ocb_enc(const TWOFISH_context *ctx, byte *out, const byte *in,
-		      byte *offset, byte *checksum, const void *Ls[3])
+		      byte *offset, byte *checksum, const u64 Ls[3])
 {
 #ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS
   call_sysv_fn6(_gcry_twofish_amd64_ocb_enc, ctx, out, in, offset, checksum, Ls);
@@ -865,7 +865,7 @@ twofish_amd64_ocb_enc(const TWOFISH_context *ctx, byte *out, const byte *in,
 
 static inline void
 twofish_amd64_ocb_dec(const TWOFISH_context *ctx, byte *out, const byte *in,
-		      byte *offset, byte *checksum, const void *Ls[3])
+		      byte *offset, byte *checksum, const u64 Ls[3])
 {
 #ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS
   call_sysv_fn6(_gcry_twofish_amd64_ocb_dec, ctx, out, in, offset, checksum, Ls);
@@ -876,7 +876,7 @@ twofish_amd64_ocb_dec(const TWOFISH_context *ctx, byte *out, const byte *in,
 
 static inline void
 twofish_amd64_ocb_auth(const TWOFISH_context *ctx, const byte *abuf,
-		       byte *offset, byte *checksum, const void *Ls[3])
+		       byte *offset, byte *checksum, const u64 Ls[3])
 {
 #ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS
   call_sysv_fn5(_gcry_twofish_amd64_ocb_auth, ctx, abuf, offset, checksum, Ls);
@@ -1261,15 +1261,17 @@ _gcry_twofish_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
   u64 blkn = c->u_mode.ocb.data_nblocks;
 
   {
-    const void *Ls[3];
+    /* Use u64 to store pointers for x32 support (assembly function
+      * assumes 64-bit pointers). */
+    u64 Ls[3];
 
     /* Process data in 3 block chunks. */
     while (nblocks >= 3)
       {
 	/* l_tmp will be used only every 65536-th block. */
-	Ls[0] = ocb_get_l(c, l_tmp, blkn + 1);
-	Ls[1] = ocb_get_l(c, l_tmp, blkn + 2);
-	Ls[2] = ocb_get_l(c, l_tmp, blkn + 3);
+	Ls[0] = (uintptr_t)(const void *)ocb_get_l(c, l_tmp, blkn + 1);
+	Ls[1] = (uintptr_t)(const void *)ocb_get_l(c, l_tmp, blkn + 2);
+	Ls[2] = (uintptr_t)(const void *)ocb_get_l(c, l_tmp, blkn + 3);
 	blkn += 3;
 
 	if (encrypt)
@@ -1320,15 +1322,17 @@ _gcry_twofish_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg,
   u64 blkn = c->u_mode.ocb.aad_nblocks;
 
   {
-    const void *Ls[3];
+    /* Use u64 to store pointers for x32 support (assembly function
+      * assumes 64-bit pointers). */
+    u64 Ls[3];
 
     /* Process data in 3 block chunks. */
     while (nblocks >= 3)
       {
 	/* l_tmp will be used only every 65536-th block. */
-	Ls[0] = ocb_get_l(c, l_tmp, blkn + 1);
-	Ls[1] = ocb_get_l(c, l_tmp, blkn + 2);
-	Ls[2] = ocb_get_l(c, l_tmp, blkn + 3);
+	Ls[0] = (uintptr_t)(const void *)ocb_get_l(c, l_tmp, blkn + 1);
+	Ls[1] = (uintptr_t)(const void *)ocb_get_l(c, l_tmp, blkn + 2);
+	Ls[2] = (uintptr_t)(const void *)ocb_get_l(c, l_tmp, blkn + 3);
 	blkn += 3;
 
 	twofish_amd64_ocb_auth(ctx, abuf, c->u_mode.ocb.aad_offset,




More information about the Gcrypt-devel mailing list