[git] GCRYPT - branch, master, updated. libgcrypt-1.6.0-144-gc2e1f8f

by Jussi Kivilinna cvs at cvs.gnupg.org
Thu Jan 1 19:35:16 CET 2015


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "The GNU crypto library".

The branch, master has been updated
       via  c2e1f8fea271f3ef8027809547c4a52e0b1e24a2 (commit)
       via  1dab4c9422bf0f3cdc7a4d3ccf9db090abd90e94 (commit)
       via  99faf9cb34f872144313403f29f3379798debfc9 (commit)
       via  4515315f61fbf79413e150fbd1d5f5a2435f2bc5 (commit)
       via  cc26106dbebeb84d481661813edc3e5aea9a7d99 (commit)
      from  520070e02e2e6ee7228945015573a6e1f4895ec3 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit c2e1f8fea271f3ef8027809547c4a52e0b1e24a2
Author: Jussi Kivilinna <jussi.kivilinna at iki.fi>
Date:   Tue Dec 23 13:33:12 2014 +0200

    random-csprng: fix compiler warnings on ARM
    
    * random/random-csprng.c (_gcry_rngcsprng_update_seed_file)
    (read_pool): Cast keypool and rndpool to 'unsigned long *' through
    'void *'.
    --
    
    Patch fixes 'cast increases required alignment' warnings seen on GCC:
    
    random-csprng.c: In function '_gcry_rngcsprng_update_seed_file':
    random-csprng.c:867:15: warning: cast increases required alignment of target type [-Wcast-align]
       for (i=0,dp=(unsigned long*)keypool, sp=(unsigned long*)rndpool;
                   ^
    random-csprng.c:867:43: warning: cast increases required alignment of target type [-Wcast-align]
       for (i=0,dp=(unsigned long*)keypool, sp=(unsigned long*)rndpool;
                                               ^
    random-csprng.c: In function 'read_pool':
    random-csprng.c:1023:14: warning: cast increases required alignment of target type [-Wcast-align]
       for(i=0,dp=(unsigned long*)keypool, sp=(unsigned long*)rndpool;
                  ^
    random-csprng.c:1023:42: warning: cast increases required alignment of target type [-Wcast-align]
       for(i=0,dp=(unsigned long*)keypool, sp=(unsigned long*)rndpool;
                                              ^
    
    Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>

diff --git a/random/random-csprng.c b/random/random-csprng.c
index 429c84f..332744b 100644
--- a/random/random-csprng.c
+++ b/random/random-csprng.c
@@ -864,7 +864,7 @@ _gcry_rngcsprng_update_seed_file (void)
 
 
   /* Copy the entropy pool to a scratch pool and mix both of them. */
-  for (i=0,dp=(unsigned long*)keypool, sp=(unsigned long*)rndpool;
+  for (i=0,dp=(unsigned long*)(void*)keypool, sp=(unsigned long*)(void*)rndpool;
        i < POOLWORDS; i++, dp++, sp++ )
     {
       *dp = *sp + ADD_VALUE;
@@ -1020,7 +1020,7 @@ read_pool (byte *buffer, size_t length, int level)
     }
 
   /* Create a new pool. */
-  for(i=0,dp=(unsigned long*)keypool, sp=(unsigned long*)rndpool;
+  for(i=0,dp=(unsigned long*)(void*)keypool, sp=(unsigned long*)(void*)rndpool;
       i < POOLWORDS; i++, dp++, sp++ )
     *dp = *sp + ADD_VALUE;
 

commit 1dab4c9422bf0f3cdc7a4d3ccf9db090abd90e94
Author: Jussi Kivilinna <jussi.kivilinna at iki.fi>
Date:   Tue Dec 23 13:31:58 2014 +0200

    scrypt: fix compiler warnings on ARM
    
    * cipher/scrypt.c (_scryptBlockMix): Cast X to 'u32 *' through 'void *'.
    --
    
    Patch fixes 'cast increases required alignment' warnings seen on GCC:
    
    scrypt.c: In function '_scryptBlockMix':
    scrypt.c:145:22: warning: cast increases required alignment of target type [-Wcast-align]
           _salsa20_core ((u32*)X, (u32*)X, 8);
                          ^
    scrypt.c:145:31: warning: cast increases required alignment of target type [-Wcast-align]
           _salsa20_core ((u32*)X, (u32*)X, 8);
                                   ^
    
    Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>

diff --git a/cipher/scrypt.c b/cipher/scrypt.c
index 404943d..aca903d 100644
--- a/cipher/scrypt.c
+++ b/cipher/scrypt.c
@@ -142,7 +142,7 @@ _scryptBlockMix (u32 r, unsigned char *B, unsigned char *tmp2)
       buf_xor(X, X, &B[i * 64], 64);
 
       /* X = Salsa (T) */
-      _salsa20_core ((u32*)X, (u32*)X, 8);
+      _salsa20_core ((u32*)(void*)X, (u32*)(void*)X, 8);
 
       /* Y[i] = X */
       memcpy (&Y[i * 64], X, 64);

commit 99faf9cb34f872144313403f29f3379798debfc9
Author: Jussi Kivilinna <jussi.kivilinna at iki.fi>
Date:   Tue Dec 23 13:31:09 2014 +0200

    secmem: fix compiler warnings on ARM
    
    * src/secmem.c (ADDR_TO_BLOCK, mb_get_next, mb_get_new): Cast pointer
    from 'char *' to 'memblock_t *' through 'void *'.
    (MB_WIPE_OUT): Remove unneeded cast to 'memblock_t *'.
    --
    
    Patch fixes 'cast increases required alignment' warnings seen on GCC:
    
    secmem.c: In function 'mb_get_next':
    secmem.c:140:13: warning: cast increases required alignment of target type [-Wcast-align]
       mb_next = (memblock_t *) ((char *) mb + BLOCK_HEAD_SIZE + mb->size);
                 ^
    secmem.c: In function 'mb_get_new':
    secmem.c:208:17: warning: cast increases required alignment of target type [-Wcast-align]
          mb_split = (memblock_t *) (((char *) mb) + BLOCK_HEAD_SIZE + size);
                     ^
    secmem.c: In function '_gcry_secmem_free_internal':
    secmem.c:101:3: warning: cast increases required alignment of target type [-Wcast-align]
       (memblock_t *) ((char *) addr - BLOCK_HEAD_SIZE)
       ^
    secmem.c:603:8: note: in expansion of macro 'ADDR_TO_BLOCK'
       mb = ADDR_TO_BLOCK (a);
            ^
    In file included from secmem.c:40:0:
    secmem.c:609:16: warning: cast increases required alignment of target type [-Wcast-align]
       wipememory2 ((memblock_t *) ((char *) mb + BLOCK_HEAD_SIZE), (byte), size);
                    ^
    g10lib.h:309:54: note: in definition of macro 'wipememory2'
                   volatile char *_vptr=(volatile char *)(_ptr); \
                                                          ^
    secmem.c:611:3: note: in expansion of macro 'MB_WIPE_OUT'
       MB_WIPE_OUT (0xff);
       ^
    secmem.c:609:16: warning: cast increases required alignment of target type [-Wcast-align]
       wipememory2 ((memblock_t *) ((char *) mb + BLOCK_HEAD_SIZE), (byte), size);
                    ^
    g10lib.h:309:54: note: in definition of macro 'wipememory2'
                   volatile char *_vptr=(volatile char *)(_ptr); \
                                                          ^
    secmem.c:612:3: note: in expansion of macro 'MB_WIPE_OUT'
       MB_WIPE_OUT (0xaa);
       ^
    secmem.c:609:16: warning: cast increases required alignment of target type [-Wcast-align]
       wipememory2 ((memblock_t *) ((char *) mb + BLOCK_HEAD_SIZE), (byte), size);
                    ^
    g10lib.h:309:54: note: in definition of macro 'wipememory2'
                   volatile char *_vptr=(volatile char *)(_ptr); \
                                                          ^
    secmem.c:613:3: note: in expansion of macro 'MB_WIPE_OUT'
       MB_WIPE_OUT (0x55);
       ^
    secmem.c:609:16: warning: cast increases required alignment of target type [-Wcast-align]
       wipememory2 ((memblock_t *) ((char *) mb + BLOCK_HEAD_SIZE), (byte), size);
                    ^
    g10lib.h:309:54: note: in definition of macro 'wipememory2'
                   volatile char *_vptr=(volatile char *)(_ptr); \
                                                          ^
    secmem.c:614:3: note: in expansion of macro 'MB_WIPE_OUT'
       MB_WIPE_OUT (0x00);
       ^
    secmem.c: In function '_gcry_secmem_realloc':
    secmem.c:644:8: warning: cast increases required alignment of target type [-Wcast-align]
       mb = (memblock_t *) ((char *) p - ((size_t) &((memblock_t *) 0)->aligned.c));
            ^
    
    Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>

diff --git a/src/secmem.c b/src/secmem.c
index cfea921..df15df0 100644
--- a/src/secmem.c
+++ b/src/secmem.c
@@ -98,7 +98,7 @@ GPGRT_LOCK_DEFINE (secmem_lock);
 
 /* Convert an address into the according memory block structure.  */
 #define ADDR_TO_BLOCK(addr) \
-  (memblock_t *) ((char *) addr - BLOCK_HEAD_SIZE)
+  (memblock_t *) (void *) ((char *) addr - BLOCK_HEAD_SIZE)
 
 /* Check whether P points into the pool.  */
 static int
@@ -137,7 +137,7 @@ mb_get_next (memblock_t *mb)
 {
   memblock_t *mb_next;
 
-  mb_next = (memblock_t *) ((char *) mb + BLOCK_HEAD_SIZE + mb->size);
+  mb_next = (memblock_t *) (void *) ((char *) mb + BLOCK_HEAD_SIZE + mb->size);
 
   if (! ptr_into_pool_p (mb_next))
     mb_next = NULL;
@@ -205,7 +205,8 @@ mb_get_new (memblock_t *block, size_t size)
 	  {
 	    /* Split block.  */
 
-	    mb_split = (memblock_t *) (((char *) mb) + BLOCK_HEAD_SIZE + size);
+	    mb_split = (memblock_t *) (void *) (((char *) mb) + BLOCK_HEAD_SIZE
+						+ size);
 	    mb_split->size = mb->size - size - BLOCK_HEAD_SIZE;
 	    mb_split->flags = 0;
 
@@ -606,7 +607,7 @@ _gcry_secmem_free_internal (void *a)
   /* This does not make much sense: probably this memory is held in the
    * cache. We do it anyway: */
 #define MB_WIPE_OUT(byte) \
-  wipememory2 ((memblock_t *) ((char *) mb + BLOCK_HEAD_SIZE), (byte), size);
+  wipememory2 (((char *) mb + BLOCK_HEAD_SIZE), (byte), size);
 
   MB_WIPE_OUT (0xff);
   MB_WIPE_OUT (0xaa);
@@ -641,7 +642,8 @@ _gcry_secmem_realloc (void *p, size_t newsize)
 
   SECMEM_LOCK;
 
-  mb = (memblock_t *) ((char *) p - ((size_t) &((memblock_t *) 0)->aligned.c));
+  mb = (memblock_t *) (void *) ((char *) p
+				- ((size_t) &((memblock_t *) 0)->aligned.c));
   size = mb->size;
   if (newsize < size)
     {

commit 4515315f61fbf79413e150fbd1d5f5a2435f2bc5
Author: Jussi Kivilinna <jussi.kivilinna at iki.fi>
Date:   Tue Dec 23 13:01:33 2014 +0200

    hash: fix compiler warning on ARM
    
    * cipher/md.c (md_open, md_copy): Cast 'char *' to ctx through
    'void *'.
    * cipher/md4.c (md4_final): Use buf_put_* helper instead of
    converting 'char *' to 'u32 *'.
    * cipher/md5.c (md5_final): Ditto.
    * cipher/rmd160.c (_gcry_rmd160_mixblock, rmd160_final): Ditto.
    * cipher/sha1.c (sha1_final): Ditto.
    * cipher/sha256.c (sha256_final): Ditto.
    * cipher/sha512.c (sha512_final): Ditto.
    * cipher/tiger.c (tiger_final): Ditto.
    --
    
    Patch fixes 'cast increases required alignment' warnings seen on GCC:
    
    md.c: In function 'md_open':
    md.c:318:23: warning: cast increases required alignment of target type [-Wcast-align]
           hd->ctx = ctx = (struct gcry_md_context *) ((char *) hd + n);
                           ^
    md.c: In function 'md_copy':
    md.c:491:22: warning: cast increases required alignment of target type [-Wcast-align]
           bhd->ctx = b = (struct gcry_md_context *) ((char *) bhd + n);
                          ^
    md4.c: In function 'md4_final':
    md4.c:258:20: warning: cast increases required alignment of target type [-Wcast-align]
     #define X(a) do { *(u32*)p = le_bswap32((*hd).a) ; p += 4; } while(0)
                        ^
    md4.c:259:3: note: in expansion of macro 'X'
       X(A);
       ^
    md4.c:258:20: warning: cast increases required alignment of target type [-Wcast-align]
     #define X(a) do { *(u32*)p = le_bswap32((*hd).a) ; p += 4; } while(0)
                        ^
    md4.c:260:3: note: in expansion of macro 'X'
       X(B);
       ^
    [removed the rest]
    
    Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>

diff --git a/cipher/md.c b/cipher/md.c
index df8b027..f9414de 100644
--- a/cipher/md.c
+++ b/cipher/md.c
@@ -315,7 +315,7 @@ md_open (gcry_md_hd_t *h, int algo, unsigned int flags)
 
   if (! err)
     {
-      hd->ctx = ctx = (struct gcry_md_context *) ((char *) hd + n);
+      hd->ctx = ctx = (void *) ((char *) hd + n);
       /* Setup the globally visible data (bctl in the diagram).*/
       hd->bufsize = n - sizeof (struct gcry_md_handle) + 1;
       hd->bufpos = 0;
@@ -488,7 +488,7 @@ md_copy (gcry_md_hd_t ahd, gcry_md_hd_t *b_hd)
 
   if (! err)
     {
-      bhd->ctx = b = (struct gcry_md_context *) ((char *) bhd + n);
+      bhd->ctx = b = (void *) ((char *) bhd + n);
       /* No need to copy the buffer due to the write above. */
       gcry_assert (ahd->bufsize == (n - sizeof (struct gcry_md_handle) + 1));
       bhd->bufsize = ahd->bufsize;
diff --git a/cipher/md4.c b/cipher/md4.c
index 7291254..c9b4154 100644
--- a/cipher/md4.c
+++ b/cipher/md4.c
@@ -255,7 +255,7 @@ md4_final( void *context )
   _gcry_burn_stack (burn);
 
   p = hd->bctx.buf;
-#define X(a) do { *(u32*)p = le_bswap32((*hd).a) ; p += 4; } while(0)
+#define X(a) do { buf_put_le32(p, hd->a); p += 4; } while(0)
   X(A);
   X(B);
   X(C);
diff --git a/cipher/md5.c b/cipher/md5.c
index 73ad968..f17af7a 100644
--- a/cipher/md5.c
+++ b/cipher/md5.c
@@ -279,7 +279,7 @@ md5_final( void *context)
   _gcry_burn_stack (burn);
 
   p = hd->bctx.buf;
-#define X(a) do { *(u32*)p = le_bswap32((*hd).a) ; p += 4; } while(0)
+#define X(a) do { buf_put_le32(p, hd->a); p += 4; } while(0)
   X(A);
   X(B);
   X(C);
diff --git a/cipher/rmd160.c b/cipher/rmd160.c
index e6d02f5..2b1f321 100644
--- a/cipher/rmd160.c
+++ b/cipher/rmd160.c
@@ -411,7 +411,7 @@ _gcry_rmd160_mixblock ( RMD160_CONTEXT *hd, void *blockof64byte )
   char *p = blockof64byte;
 
   transform ( hd, blockof64byte, 1 );
-#define X(a) do { *(u32*)p = hd->h##a ; p += 4; } while(0)
+#define X(a) do { buf_put_le32(p, hd->h##a); p += 4; } while(0)
   X(0);
   X(1);
   X(2);
@@ -474,7 +474,7 @@ rmd160_final( void *context )
   _gcry_burn_stack (burn);
 
   p = hd->bctx.buf;
-#define X(a) do { *(u32*)p = le_bswap32(hd->h##a) ; p += 4; } while(0)
+#define X(a) do { buf_put_le32(p, hd->h##a); p += 4; } while(0)
   X(0);
   X(1);
   X(2);
diff --git a/cipher/sha1.c b/cipher/sha1.c
index 00c57dd..6ccf0e8 100644
--- a/cipher/sha1.c
+++ b/cipher/sha1.c
@@ -401,7 +401,7 @@ sha1_final(void *context)
   _gcry_burn_stack (burn);
 
   p = hd->bctx.buf;
-#define X(a) do { *(u32*)p = be_bswap32(hd->h##a) ; p += 4; } while(0)
+#define X(a) do { buf_put_be32(p, hd->h##a); p += 4; } while(0)
   X(0);
   X(1);
   X(2);
diff --git a/cipher/sha256.c b/cipher/sha256.c
index 4efaec6..d3af172 100644
--- a/cipher/sha256.c
+++ b/cipher/sha256.c
@@ -428,7 +428,7 @@ sha256_final(void *context)
   _gcry_burn_stack (burn);
 
   p = hd->bctx.buf;
-#define X(a) do { *(u32*)p = be_bswap32(hd->h##a); p += 4; } while(0)
+#define X(a) do { buf_put_be32(p, hd->h##a); p += 4; } while(0)
   X(0);
   X(1);
   X(2);
diff --git a/cipher/sha512.c b/cipher/sha512.c
index 7d60df0..5a6af80 100644
--- a/cipher/sha512.c
+++ b/cipher/sha512.c
@@ -669,7 +669,7 @@ sha512_final (void *context)
   _gcry_burn_stack (stack_burn_depth);
 
   p = hd->bctx.buf;
-#define X(a) do { *(u64*)p = be_bswap64(hd->state.h##a) ; p += 8; } while (0)
+#define X(a) do { buf_put_be64(p, hd->state.h##a); p += 8; } while (0)
   X (0);
   X (1);
   X (2);
diff --git a/cipher/tiger.c b/cipher/tiger.c
index 91db4e6..8a08953 100644
--- a/cipher/tiger.c
+++ b/cipher/tiger.c
@@ -805,8 +805,8 @@ tiger_final( void *context )
   _gcry_burn_stack (burn);
 
   p = hd->bctx.buf;
-#define X(a) do { *(u64*)p = be_bswap64(hd->a); p += 8; } while(0)
-#define Y(a) do { *(u64*)p = le_bswap64(hd->a); p += 8; } while(0)
+#define X(a) do { buf_put_be64(p, hd->a); p += 8; } while(0)
+#define Y(a) do { buf_put_le64(p, hd->a); p += 8; } while(0)
   if (hd->variant == 0)
     {
       X(a);

commit cc26106dbebeb84d481661813edc3e5aea9a7d99
Author: Jussi Kivilinna <jussi.kivilinna at iki.fi>
Date:   Tue Dec 23 12:13:50 2014 +0200

    rijndael: fix compiler warnings on ARM
    
    * cipher/rijndael-internal.h (RIJNDAEL_context_s): Add u32 variants of
    keyschedule arrays to unions u1 and u2.
    (keyschedenc32, keyscheddec32): New.
    * cipher/rijndael.c (u32_a_t): Remove.
    (do_setkey): Add and use tkk[].data32, k_u32, tk_u32 and W_u32; Remove
    casting byte arrays to u32_a_t.
    (prepare_decryption, do_encrypt_fn, do_decrypt_fn): Use keyschedenc32
    and keyscheddec32; Remove casting byte arrays to u32_a_t.
    --
    
    Patch fixes 'cast increases required alignment' compiler warnings that GCC was showing:
    
    rijndael.c: In function 'do_setkey':
    rijndael.c:310:13: warning: cast increases required alignment of target type [-Wcast-align]
               *((u32_a_t*)tk[j]) = *((u32_a_t*)k[j]);
                 ^
    rijndael.c:310:34: warning: cast increases required alignment of target type [-Wcast-align]
               *((u32_a_t*)tk[j]) = *((u32_a_t*)k[j]);
    [removed the rest]
    
    Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>

diff --git a/cipher/rijndael-internal.h b/cipher/rijndael-internal.h
index 7bc3790..7ff8660 100644
--- a/cipher/rijndael-internal.h
+++ b/cipher/rijndael-internal.h
@@ -95,6 +95,7 @@ typedef struct RIJNDAEL_context_s
   {
     PROPERLY_ALIGNED_TYPE dummy;
     byte keyschedule[MAXROUNDS+1][4][4];
+    u32 keyschedule32[MAXROUNDS+1][4];
 #ifdef USE_PADLOCK
     /* The key as passed to the padlock engine.  It is only used if
        the padlock engine is used (USE_PADLOCK, below).  */
@@ -105,6 +106,7 @@ typedef struct RIJNDAEL_context_s
   {
     PROPERLY_ALIGNED_TYPE dummy;
     byte keyschedule[MAXROUNDS+1][4][4];
+    u32 keyschedule32[MAXROUNDS+1][4];
   } u2;
   int rounds;                         /* Key-length-dependent number of rounds.  */
   unsigned int decryption_prepared:1; /* The decryption key schedule is available.  */
@@ -121,8 +123,10 @@ typedef struct RIJNDAEL_context_s
 } RIJNDAEL_context ATTR_ALIGNED_16;
 
 /* Macros defining alias for the keyschedules.  */
-#define keyschenc  u1.keyschedule
-#define keyschdec  u2.keyschedule
-#define padlockkey u1.padlock_key
+#define keyschenc   u1.keyschedule
+#define keyschenc32 u1.keyschedule32
+#define keyschdec   u2.keyschedule
+#define keyschdec32 u2.keyschedule32
+#define padlockkey  u1.padlock_key
 
 #endif /* G10_RIJNDAEL_INTERNAL_H */
diff --git a/cipher/rijndael.c b/cipher/rijndael.c
index 5b0fe1c..7a83718 100644
--- a/cipher/rijndael.c
+++ b/cipher/rijndael.c
@@ -50,14 +50,6 @@
 #include "rijndael-internal.h"
 
 
-/* Define an u32 variant for the sake of gcc 4.4's strict aliasing.  */
-#if __GNUC__ > 4 || ( __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )
-typedef u32           __attribute__ ((__may_alias__)) u32_a_t;
-#else
-typedef u32           u32_a_t;
-#endif
-
-
 #ifdef USE_AMD64_ASM
 /* AMD64 assembly implementations of AES */
 extern unsigned int _gcry_aes_amd64_encrypt_block(const void *keysched_enc,
@@ -293,10 +285,14 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
         {
           PROPERLY_ALIGNED_TYPE dummy;
           byte data[MAXKC][4];
+          u32 data32[MAXKC];
         } tkk[2];
-#define k tkk[0].data
-#define tk tkk[1].data
-#define W (ctx->keyschenc)
+#define k      tkk[0].data
+#define k_u32  tkk[0].data32
+#define tk     tkk[1].data
+#define tk_u32 tkk[1].data32
+#define W      (ctx->keyschenc)
+#define W_u32  (ctx->keyschenc32)
 
       prefetch_enc();
 
@@ -307,7 +303,7 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
 
       for (j = KC-1; j >= 0; j--)
         {
-          *((u32_a_t*)tk[j]) = *((u32_a_t*)k[j]);
+          tk_u32[j] = k_u32[j];
         }
       r = 0;
       t = 0;
@@ -316,7 +312,7 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
         {
           for (; (j < KC) && (t < 4); j++, t++)
             {
-              *((u32_a_t*)W[r][t]) = le_bswap32(*((u32_a_t*)tk[j]));
+              W_u32[r][t] = le_bswap32(tk_u32[j]);
             }
           if (t == 4)
             {
@@ -339,14 +335,14 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
             {
               for (j = 1; j < KC; j++)
                 {
-                  *((u32_a_t*)tk[j]) ^= *((u32_a_t*)tk[j-1]);
+                  tk_u32[j] ^= tk_u32[j-1];
                 }
             }
           else
             {
               for (j = 1; j < KC/2; j++)
                 {
-                  *((u32_a_t*)tk[j]) ^= *((u32_a_t*)tk[j-1]);
+                  tk_u32[j] ^= tk_u32[j-1];
                 }
               tk[KC/2][0] ^= sbox[tk[KC/2 - 1][0] * 4];
               tk[KC/2][1] ^= sbox[tk[KC/2 - 1][1] * 4];
@@ -354,7 +350,7 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
               tk[KC/2][3] ^= sbox[tk[KC/2 - 1][3] * 4];
               for (j = KC/2 + 1; j < KC; j++)
                 {
-                  *((u32_a_t*)tk[j]) ^= *((u32_a_t*)tk[j-1]);
+                  tk_u32[j] ^= tk_u32[j-1];
                 }
             }
 
@@ -363,7 +359,7 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
             {
               for (; (j < KC) && (t < 4); j++, t++)
                 {
-                  *((u32_a_t*)W[r][t]) = le_bswap32(*((u32_a_t*)tk[j]));
+                  W_u32[r][t] = le_bswap32(tk_u32[j]);
                 }
               if (t == 4)
                 {
@@ -375,6 +371,9 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
 #undef W
 #undef tk
 #undef k
+#undef W_u32
+#undef tk_u32
+#undef k_u32
       wipememory(&tkk, sizeof(tkk));
     }
 
@@ -417,15 +416,15 @@ prepare_decryption( RIJNDAEL_context *ctx )
       prefetch_enc();
       prefetch_dec();
 
-      *((u32_a_t*)ctx->keyschdec[0][0]) = *((u32_a_t*)ctx->keyschenc[0][0]);
-      *((u32_a_t*)ctx->keyschdec[0][1]) = *((u32_a_t*)ctx->keyschenc[0][1]);
-      *((u32_a_t*)ctx->keyschdec[0][2]) = *((u32_a_t*)ctx->keyschenc[0][2]);
-      *((u32_a_t*)ctx->keyschdec[0][3]) = *((u32_a_t*)ctx->keyschenc[0][3]);
+      ctx->keyschdec32[0][0] = ctx->keyschenc32[0][0];
+      ctx->keyschdec32[0][1] = ctx->keyschenc32[0][1];
+      ctx->keyschdec32[0][2] = ctx->keyschenc32[0][2];
+      ctx->keyschdec32[0][3] = ctx->keyschenc32[0][3];
 
       for (r = 1; r < ctx->rounds; r++)
         {
-          u32_a_t *wi = (u32_a_t*)((ctx->keyschenc)[r]);
-          u32_a_t *wo = (u32_a_t*)((ctx->keyschdec)[r]);
+          u32 *wi = ctx->keyschenc32[r];
+          u32 *wo = ctx->keyschdec32[r];
           u32 wt;
 
           wt = wi[0];
@@ -453,10 +452,10 @@ prepare_decryption( RIJNDAEL_context *ctx )
                  ^ rol(decT[sbox[(byte)(wt >> 24) * 4]], 8 * 3);
         }
 
-      *((u32_a_t*)ctx->keyschdec[r][0]) = *((u32_a_t*)ctx->keyschenc[r][0]);
-      *((u32_a_t*)ctx->keyschdec[r][1]) = *((u32_a_t*)ctx->keyschenc[r][1]);
-      *((u32_a_t*)ctx->keyschdec[r][2]) = *((u32_a_t*)ctx->keyschenc[r][2]);
-      *((u32_a_t*)ctx->keyschdec[r][3]) = *((u32_a_t*)ctx->keyschenc[r][3]);
+      ctx->keyschdec32[r][0] = ctx->keyschenc32[r][0];
+      ctx->keyschdec32[r][1] = ctx->keyschenc32[r][1];
+      ctx->keyschdec32[r][2] = ctx->keyschenc32[r][2];
+      ctx->keyschdec32[r][3] = ctx->keyschenc32[r][3];
     }
 }
 
@@ -467,7 +466,7 @@ static unsigned int
 do_encrypt_fn (const RIJNDAEL_context *ctx, unsigned char *b,
                const unsigned char *a)
 {
-#define rk (ctx->keyschenc)
+#define rk (ctx->keyschenc32)
   const byte *sbox = ((const byte *)encT) + 1;
   int rounds = ctx->rounds;
   int r;
@@ -479,34 +478,34 @@ do_encrypt_fn (const RIJNDAEL_context *ctx, unsigned char *b,
   sb[2] = buf_get_le32(a + 8);
   sb[3] = buf_get_le32(a + 12);
 
-  sa[0] = sb[0] ^ *((u32_a_t*)rk[0][0]);
-  sa[1] = sb[1] ^ *((u32_a_t*)rk[0][1]);
-  sa[2] = sb[2] ^ *((u32_a_t*)rk[0][2]);
-  sa[3] = sb[3] ^ *((u32_a_t*)rk[0][3]);
+  sa[0] = sb[0] ^ rk[0][0];
+  sa[1] = sb[1] ^ rk[0][1];
+  sa[2] = sb[2] ^ rk[0][2];
+  sa[3] = sb[3] ^ rk[0][3];
 
   sb[0] = rol(encT[(byte)(sa[0] >> (0 * 8))], (0 * 8));
   sb[3] = rol(encT[(byte)(sa[0] >> (1 * 8))], (1 * 8));
   sb[2] = rol(encT[(byte)(sa[0] >> (2 * 8))], (2 * 8));
   sb[1] = rol(encT[(byte)(sa[0] >> (3 * 8))], (3 * 8));
-  sa[0] = *((u32_a_t*)rk[1][0]) ^ sb[0];
+  sa[0] = rk[1][0] ^ sb[0];
 
   sb[1] ^= rol(encT[(byte)(sa[1] >> (0 * 8))], (0 * 8));
   sa[0] ^= rol(encT[(byte)(sa[1] >> (1 * 8))], (1 * 8));
   sb[3] ^= rol(encT[(byte)(sa[1] >> (2 * 8))], (2 * 8));
   sb[2] ^= rol(encT[(byte)(sa[1] >> (3 * 8))], (3 * 8));
-  sa[1] = *((u32_a_t*)rk[1][1]) ^ sb[1];
+  sa[1] = rk[1][1] ^ sb[1];
 
   sb[2] ^= rol(encT[(byte)(sa[2] >> (0 * 8))], (0 * 8));
   sa[1] ^= rol(encT[(byte)(sa[2] >> (1 * 8))], (1 * 8));
   sa[0] ^= rol(encT[(byte)(sa[2] >> (2 * 8))], (2 * 8));
   sb[3] ^= rol(encT[(byte)(sa[2] >> (3 * 8))], (3 * 8));
-  sa[2] = *((u32_a_t*)rk[1][2]) ^ sb[2];
+  sa[2] = rk[1][2] ^ sb[2];
 
   sb[3] ^= rol(encT[(byte)(sa[3] >> (0 * 8))], (0 * 8));
   sa[2] ^= rol(encT[(byte)(sa[3] >> (1 * 8))], (1 * 8));
   sa[1] ^= rol(encT[(byte)(sa[3] >> (2 * 8))], (2 * 8));
   sa[0] ^= rol(encT[(byte)(sa[3] >> (3 * 8))], (3 * 8));
-  sa[3] = *((u32_a_t*)rk[1][3]) ^ sb[3];
+  sa[3] = rk[1][3] ^ sb[3];
 
   for (r = 2; r < rounds; r++)
     {
@@ -514,25 +513,25 @@ do_encrypt_fn (const RIJNDAEL_context *ctx, unsigned char *b,
       sb[3] = rol(encT[(byte)(sa[0] >> (1 * 8))], (1 * 8));
       sb[2] = rol(encT[(byte)(sa[0] >> (2 * 8))], (2 * 8));
       sb[1] = rol(encT[(byte)(sa[0] >> (3 * 8))], (3 * 8));
-      sa[0] = *((u32_a_t*)rk[r][0]) ^ sb[0];
+      sa[0] = rk[r][0] ^ sb[0];
 
       sb[1] ^= rol(encT[(byte)(sa[1] >> (0 * 8))], (0 * 8));
       sa[0] ^= rol(encT[(byte)(sa[1] >> (1 * 8))], (1 * 8));
       sb[3] ^= rol(encT[(byte)(sa[1] >> (2 * 8))], (2 * 8));
       sb[2] ^= rol(encT[(byte)(sa[1] >> (3 * 8))], (3 * 8));
-      sa[1] = *((u32_a_t*)rk[r][1]) ^ sb[1];
+      sa[1] = rk[r][1] ^ sb[1];
 
       sb[2] ^= rol(encT[(byte)(sa[2] >> (0 * 8))], (0 * 8));
       sa[1] ^= rol(encT[(byte)(sa[2] >> (1 * 8))], (1 * 8));
       sa[0] ^= rol(encT[(byte)(sa[2] >> (2 * 8))], (2 * 8));
       sb[3] ^= rol(encT[(byte)(sa[2] >> (3 * 8))], (3 * 8));
-      sa[2] = *((u32_a_t*)rk[r][2]) ^ sb[2];
+      sa[2] = rk[r][2] ^ sb[2];
 
       sb[3] ^= rol(encT[(byte)(sa[3] >> (0 * 8))], (0 * 8));
       sa[2] ^= rol(encT[(byte)(sa[3] >> (1 * 8))], (1 * 8));
       sa[1] ^= rol(encT[(byte)(sa[3] >> (2 * 8))], (2 * 8));
       sa[0] ^= rol(encT[(byte)(sa[3] >> (3 * 8))], (3 * 8));
-      sa[3] = *((u32_a_t*)rk[r][3]) ^ sb[3];
+      sa[3] = rk[r][3] ^ sb[3];
 
       r++;
 
@@ -540,25 +539,25 @@ do_encrypt_fn (const RIJNDAEL_context *ctx, unsigned char *b,
       sb[3] = rol(encT[(byte)(sa[0] >> (1 * 8))], (1 * 8));
       sb[2] = rol(encT[(byte)(sa[0] >> (2 * 8))], (2 * 8));
       sb[1] = rol(encT[(byte)(sa[0] >> (3 * 8))], (3 * 8));
-      sa[0] = *((u32_a_t*)rk[r][0]) ^ sb[0];
+      sa[0] = rk[r][0] ^ sb[0];
 
       sb[1] ^= rol(encT[(byte)(sa[1] >> (0 * 8))], (0 * 8));
       sa[0] ^= rol(encT[(byte)(sa[1] >> (1 * 8))], (1 * 8));
       sb[3] ^= rol(encT[(byte)(sa[1] >> (2 * 8))], (2 * 8));
       sb[2] ^= rol(encT[(byte)(sa[1] >> (3 * 8))], (3 * 8));
-      sa[1] = *((u32_a_t*)rk[r][1]) ^ sb[1];
+      sa[1] = rk[r][1] ^ sb[1];
 
       sb[2] ^= rol(encT[(byte)(sa[2] >> (0 * 8))], (0 * 8));
       sa[1] ^= rol(encT[(byte)(sa[2] >> (1 * 8))], (1 * 8));
       sa[0] ^= rol(encT[(byte)(sa[2] >> (2 * 8))], (2 * 8));
       sb[3] ^= rol(encT[(byte)(sa[2] >> (3 * 8))], (3 * 8));
-      sa[2] = *((u32_a_t*)rk[r][2]) ^ sb[2];
+      sa[2] = rk[r][2] ^ sb[2];
 
       sb[3] ^= rol(encT[(byte)(sa[3] >> (0 * 8))], (0 * 8));
       sa[2] ^= rol(encT[(byte)(sa[3] >> (1 * 8))], (1 * 8));
       sa[1] ^= rol(encT[(byte)(sa[3] >> (2 * 8))], (2 * 8));
       sa[0] ^= rol(encT[(byte)(sa[3] >> (3 * 8))], (3 * 8));
-      sa[3] = *((u32_a_t*)rk[r][3]) ^ sb[3];
+      sa[3] = rk[r][3] ^ sb[3];
     }
 
   /* Last round is special. */
@@ -567,25 +566,25 @@ do_encrypt_fn (const RIJNDAEL_context *ctx, unsigned char *b,
   sb[3] = (sbox[(byte)(sa[0] >> (1 * 8)) * 4]) << (1 * 8);
   sb[2] = (sbox[(byte)(sa[0] >> (2 * 8)) * 4]) << (2 * 8);
   sb[1] = (sbox[(byte)(sa[0] >> (3 * 8)) * 4]) << (3 * 8);
-  sa[0] = *((u32_a_t*)rk[r][0]) ^ sb[0];
+  sa[0] = rk[r][0] ^ sb[0];
 
   sb[1] ^= (sbox[(byte)(sa[1] >> (0 * 8)) * 4]) << (0 * 8);
   sa[0] ^= (sbox[(byte)(sa[1] >> (1 * 8)) * 4]) << (1 * 8);
   sb[3] ^= (sbox[(byte)(sa[1] >> (2 * 8)) * 4]) << (2 * 8);
   sb[2] ^= (sbox[(byte)(sa[1] >> (3 * 8)) * 4]) << (3 * 8);
-  sa[1] = *((u32_a_t*)rk[r][1]) ^ sb[1];
+  sa[1] = rk[r][1] ^ sb[1];
 
   sb[2] ^= (sbox[(byte)(sa[2] >> (0 * 8)) * 4]) << (0 * 8);
   sa[1] ^= (sbox[(byte)(sa[2] >> (1 * 8)) * 4]) << (1 * 8);
   sa[0] ^= (sbox[(byte)(sa[2] >> (2 * 8)) * 4]) << (2 * 8);
   sb[3] ^= (sbox[(byte)(sa[2] >> (3 * 8)) * 4]) << (3 * 8);
-  sa[2] = *((u32_a_t*)rk[r][2]) ^ sb[2];
+  sa[2] = rk[r][2] ^ sb[2];
 
   sb[3] ^= (sbox[(byte)(sa[3] >> (0 * 8)) * 4]) << (0 * 8);
   sa[2] ^= (sbox[(byte)(sa[3] >> (1 * 8)) * 4]) << (1 * 8);
   sa[1] ^= (sbox[(byte)(sa[3] >> (2 * 8)) * 4]) << (2 * 8);
   sa[0] ^= (sbox[(byte)(sa[3] >> (3 * 8)) * 4]) << (3 * 8);
-  sa[3] = *((u32_a_t*)rk[r][3]) ^ sb[3];
+  sa[3] = rk[r][3] ^ sb[3];
 
   buf_put_le32(b + 0, sa[0]);
   buf_put_le32(b + 4, sa[1]);
@@ -790,7 +789,7 @@ static unsigned int
 do_decrypt_fn (const RIJNDAEL_context *ctx, unsigned char *b,
                const unsigned char *a)
 {
-#define rk  (ctx->keyschdec)
+#define rk (ctx->keyschdec32)
   int rounds = ctx->rounds;
   int r;
   u32 sa[4];
@@ -801,10 +800,10 @@ do_decrypt_fn (const RIJNDAEL_context *ctx, unsigned char *b,
   sb[2] = buf_get_le32(a + 8);
   sb[3] = buf_get_le32(a + 12);
 
-  sa[0] = sb[0] ^ *((u32_a_t*)rk[rounds][0]);
-  sa[1] = sb[1] ^ *((u32_a_t*)rk[rounds][1]);
-  sa[2] = sb[2] ^ *((u32_a_t*)rk[rounds][2]);
-  sa[3] = sb[3] ^ *((u32_a_t*)rk[rounds][3]);
+  sa[0] = sb[0] ^ rk[rounds][0];
+  sa[1] = sb[1] ^ rk[rounds][1];
+  sa[2] = sb[2] ^ rk[rounds][2];
+  sa[3] = sb[3] ^ rk[rounds][3];
 
   for (r = rounds - 1; r > 1; r--)
     {
@@ -812,25 +811,25 @@ do_decrypt_fn (const RIJNDAEL_context *ctx, unsigned char *b,
       sb[1] = rol(decT[(byte)(sa[0] >> (1 * 8))], (1 * 8));
       sb[2] = rol(decT[(byte)(sa[0] >> (2 * 8))], (2 * 8));
       sb[3] = rol(decT[(byte)(sa[0] >> (3 * 8))], (3 * 8));
-      sa[0] = *((u32_a_t*)rk[r][0]) ^ sb[0];
+      sa[0] = rk[r][0] ^ sb[0];
 
       sb[1] ^= rol(decT[(byte)(sa[1] >> (0 * 8))], (0 * 8));
       sb[2] ^= rol(decT[(byte)(sa[1] >> (1 * 8))], (1 * 8));
       sb[3] ^= rol(decT[(byte)(sa[1] >> (2 * 8))], (2 * 8));
       sa[0] ^= rol(decT[(byte)(sa[1] >> (3 * 8))], (3 * 8));
-      sa[1] = *((u32_a_t*)rk[r][1]) ^ sb[1];
+      sa[1] = rk[r][1] ^ sb[1];
 
       sb[2] ^= rol(decT[(byte)(sa[2] >> (0 * 8))], (0 * 8));
       sb[3] ^= rol(decT[(byte)(sa[2] >> (1 * 8))], (1 * 8));
       sa[0] ^= rol(decT[(byte)(sa[2] >> (2 * 8))], (2 * 8));
       sa[1] ^= rol(decT[(byte)(sa[2] >> (3 * 8))], (3 * 8));
-      sa[2] = *((u32_a_t*)rk[r][2]) ^ sb[2];
+      sa[2] = rk[r][2] ^ sb[2];
 
       sb[3] ^= rol(decT[(byte)(sa[3] >> (0 * 8))], (0 * 8));
       sa[0] ^= rol(decT[(byte)(sa[3] >> (1 * 8))], (1 * 8));
       sa[1] ^= rol(decT[(byte)(sa[3] >> (2 * 8))], (2 * 8));
       sa[2] ^= rol(decT[(byte)(sa[3] >> (3 * 8))], (3 * 8));
-      sa[3] = *((u32_a_t*)rk[r][3]) ^ sb[3];
+      sa[3] = rk[r][3] ^ sb[3];
 
       r--;
 
@@ -838,75 +837,75 @@ do_decrypt_fn (const RIJNDAEL_context *ctx, unsigned char *b,
       sb[1] = rol(decT[(byte)(sa[0] >> (1 * 8))], (1 * 8));
       sb[2] = rol(decT[(byte)(sa[0] >> (2 * 8))], (2 * 8));
       sb[3] = rol(decT[(byte)(sa[0] >> (3 * 8))], (3 * 8));
-      sa[0] = *((u32_a_t*)rk[r][0]) ^ sb[0];
+      sa[0] = rk[r][0] ^ sb[0];
 
       sb[1] ^= rol(decT[(byte)(sa[1] >> (0 * 8))], (0 * 8));
       sb[2] ^= rol(decT[(byte)(sa[1] >> (1 * 8))], (1 * 8));
       sb[3] ^= rol(decT[(byte)(sa[1] >> (2 * 8))], (2 * 8));
       sa[0] ^= rol(decT[(byte)(sa[1] >> (3 * 8))], (3 * 8));
-      sa[1] = *((u32_a_t*)rk[r][1]) ^ sb[1];
+      sa[1] = rk[r][1] ^ sb[1];
 
       sb[2] ^= rol(decT[(byte)(sa[2] >> (0 * 8))], (0 * 8));
       sb[3] ^= rol(decT[(byte)(sa[2] >> (1 * 8))], (1 * 8));
       sa[0] ^= rol(decT[(byte)(sa[2] >> (2 * 8))], (2 * 8));
       sa[1] ^= rol(decT[(byte)(sa[2] >> (3 * 8))], (3 * 8));
-      sa[2] = *((u32_a_t*)rk[r][2]) ^ sb[2];
+      sa[2] = rk[r][2] ^ sb[2];
 
       sb[3] ^= rol(decT[(byte)(sa[3] >> (0 * 8))], (0 * 8));
       sa[0] ^= rol(decT[(byte)(sa[3] >> (1 * 8))], (1 * 8));
       sa[1] ^= rol(decT[(byte)(sa[3] >> (2 * 8))], (2 * 8));
       sa[2] ^= rol(decT[(byte)(sa[3] >> (3 * 8))], (3 * 8));
-      sa[3] = *((u32_a_t*)rk[r][3]) ^ sb[3];
+      sa[3] = rk[r][3] ^ sb[3];
     }
 
   sb[0] = rol(decT[(byte)(sa[0] >> (0 * 8))], (0 * 8));
   sb[1] = rol(decT[(byte)(sa[0] >> (1 * 8))], (1 * 8));
   sb[2] = rol(decT[(byte)(sa[0] >> (2 * 8))], (2 * 8));
   sb[3] = rol(decT[(byte)(sa[0] >> (3 * 8))], (3 * 8));
-  sa[0] = *((u32_a_t*)rk[1][0]) ^ sb[0];
+  sa[0] = rk[1][0] ^ sb[0];
 
   sb[1] ^= rol(decT[(byte)(sa[1] >> (0 * 8))], (0 * 8));
   sb[2] ^= rol(decT[(byte)(sa[1] >> (1 * 8))], (1 * 8));
   sb[3] ^= rol(decT[(byte)(sa[1] >> (2 * 8))], (2 * 8));
   sa[0] ^= rol(decT[(byte)(sa[1] >> (3 * 8))], (3 * 8));
-  sa[1] = *((u32_a_t*)rk[1][1]) ^ sb[1];
+  sa[1] = rk[1][1] ^ sb[1];
 
   sb[2] ^= rol(decT[(byte)(sa[2] >> (0 * 8))], (0 * 8));
   sb[3] ^= rol(decT[(byte)(sa[2] >> (1 * 8))], (1 * 8));
   sa[0] ^= rol(decT[(byte)(sa[2] >> (2 * 8))], (2 * 8));
   sa[1] ^= rol(decT[(byte)(sa[2] >> (3 * 8))], (3 * 8));
-  sa[2] = *((u32_a_t*)rk[1][2]) ^ sb[2];
+  sa[2] = rk[1][2] ^ sb[2];
 
   sb[3] ^= rol(decT[(byte)(sa[3] >> (0 * 8))], (0 * 8));
   sa[0] ^= rol(decT[(byte)(sa[3] >> (1 * 8))], (1 * 8));
   sa[1] ^= rol(decT[(byte)(sa[3] >> (2 * 8))], (2 * 8));
   sa[2] ^= rol(decT[(byte)(sa[3] >> (3 * 8))], (3 * 8));
-  sa[3] = *((u32_a_t*)rk[1][3]) ^ sb[3];
+  sa[3] = rk[1][3] ^ sb[3];
 
   /* Last round is special. */
   sb[0] = inv_sbox[(byte)(sa[0] >> (0 * 8))] << (0 * 8);
   sb[1] = inv_sbox[(byte)(sa[0] >> (1 * 8))] << (1 * 8);
   sb[2] = inv_sbox[(byte)(sa[0] >> (2 * 8))] << (2 * 8);
   sb[3] = inv_sbox[(byte)(sa[0] >> (3 * 8))] << (3 * 8);
-  sa[0] = sb[0] ^ *((u32_a_t*)rk[0][0]);
+  sa[0] = sb[0] ^ rk[0][0];
 
   sb[1] ^= inv_sbox[(byte)(sa[1] >> (0 * 8))] << (0 * 8);
   sb[2] ^= inv_sbox[(byte)(sa[1] >> (1 * 8))] << (1 * 8);
   sb[3] ^= inv_sbox[(byte)(sa[1] >> (2 * 8))] << (2 * 8);
   sa[0] ^= inv_sbox[(byte)(sa[1] >> (3 * 8))] << (3 * 8);
-  sa[1] = sb[1] ^ *((u32_a_t*)rk[0][1]);
+  sa[1] = sb[1] ^ rk[0][1];
 
   sb[2] ^= inv_sbox[(byte)(sa[2] >> (0 * 8))] << (0 * 8);
   sb[3] ^= inv_sbox[(byte)(sa[2] >> (1 * 8))] << (1 * 8);
   sa[0] ^= inv_sbox[(byte)(sa[2] >> (2 * 8))] << (2 * 8);
   sa[1] ^= inv_sbox[(byte)(sa[2] >> (3 * 8))] << (3 * 8);
-  sa[2] = sb[2] ^ *((u32_a_t*)rk[0][2]);
+  sa[2] = sb[2] ^ rk[0][2];
 
   sb[3] ^= inv_sbox[(byte)(sa[3] >> (0 * 8))] << (0 * 8);
   sa[0] ^= inv_sbox[(byte)(sa[3] >> (1 * 8))] << (1 * 8);
   sa[1] ^= inv_sbox[(byte)(sa[3] >> (2 * 8))] << (2 * 8);
   sa[2] ^= inv_sbox[(byte)(sa[3] >> (3 * 8))] << (3 * 8);
-  sa[3] = sb[3] ^ *((u32_a_t*)rk[0][3]);
+  sa[3] = sb[3] ^ rk[0][3];
 
   buf_put_le32(b + 0, sa[0]);
   buf_put_le32(b + 4, sa[1]);

-----------------------------------------------------------------------

Summary of changes:
 cipher/md.c                |    4 +-
 cipher/md4.c               |    2 +-
 cipher/md5.c               |    2 +-
 cipher/rijndael-internal.h |   10 +++-
 cipher/rijndael.c          |  137 ++++++++++++++++++++++----------------------
 cipher/rmd160.c            |    4 +-
 cipher/scrypt.c            |    2 +-
 cipher/sha1.c              |    2 +-
 cipher/sha256.c            |    2 +-
 cipher/sha512.c            |    2 +-
 cipher/tiger.c             |    4 +-
 random/random-csprng.c     |    4 +-
 src/secmem.c               |   12 ++--
 13 files changed, 96 insertions(+), 91 deletions(-)


hooks/post-receive
-- 
The GNU crypto library
http://git.gnupg.org


_______________________________________________
Gnupg-commits mailing list
Gnupg-commits at gnupg.org
http://lists.gnupg.org/mailman/listinfo/gnupg-commits




More information about the Gcrypt-devel mailing list