[git] GCRYPT - branch, master, updated. post-nuke-of-trailing-ws-5-gcdedad7

by Werner Koch cvs at cvs.gnupg.org
Mon Feb 14 20:55:48 CET 2011


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "The GNU crypto library".

The branch, master has been updated
       via  cdedad711a77befcd018e14298ab94a478a822de (commit)
       via  5ede4ed784148422e3bd2a99ad0e87831f622aa9 (commit)
      from  a39539afdf39fe525ed7512aafb92733d2fe358c (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit cdedad711a77befcd018e14298ab94a478a822de
Author: Werner Koch <wk at gnupg.org>
Date:   Mon Feb 14 20:31:47 2011 +0100

    Use a better alignment.
    
    benchmark does now support the option
      --alignment 16
    to test the non-aligned overhead.

diff --git a/cipher/ChangeLog b/cipher/ChangeLog
index 9a59bde..787fe20 100644
--- a/cipher/ChangeLog
+++ b/cipher/ChangeLog
@@ -1,5 +1,10 @@
 2011-02-14  Werner Koch  <wk at g10code.com>
 
+	* rijndael.c (ATTR_ALIGNED_16): New
+	(do_aesni): Do not copy if already aligned.
+	(do_encrypt, do_decrypt): Ditto.
+	(rijndael_decrypt, rijndael_encrypt): Increase stack burning amount.
+
 	* rijndael.c (RIJNDAEL_context): Reorder fields.  Chnage filedname
 	ROUNDS to rounds.  Move padlock_key into u1.
 	(keySched, keySched2): Rename macros to keyscherr and keyschdec
diff --git a/cipher/rijndael.c b/cipher/rijndael.c
index 56b55dc..43d7e67 100644
--- a/cipher/rijndael.c
+++ b/cipher/rijndael.c
@@ -72,6 +72,14 @@
 #define BLOCKSIZE               (128/8)
 
 
+/* Helper macro to force alignment to 16 bytes.  */
+#ifdef __GNUC__
+# define ATTR_ALIGNED_16  __attribute__ ((aligned (16)))
+#else
+# define ATTR_ALIGNED_16
+#endif
+
+
 /* USE_PADLOCK indicates whether to compile the padlock specific
    code.  */
 #undef USE_PADLOCK
@@ -510,22 +518,29 @@ static void
 do_encrypt (const RIJNDAEL_context *ctx,
             unsigned char *bx, const unsigned char *ax)
 {
-  /* BX and AX are not necessary correctly aligned.  Thus we need to
-     copy them here. */
-  union
-  {
-    u32  dummy[4];
-    byte a[16];
-  } a;
-  union
-  {
-    u32  dummy[4];
-    byte b[16];
-  } b;
+  /* BX and AX are not necessary correctly aligned.  Thus we might
+     need to copy them here.  We try to align to a 16 bytes.  */
+  if (((size_t)ax & 0x0f) || ((size_t)bx & 0x0f))
+    {
+      union
+      {
+        u32  dummy[4];
+        byte a[16] ATTR_ALIGNED_16;
+      } a;
+      union
+      {
+        u32  dummy[4];
+        byte b[16] ATTR_ALIGNED_16;
+      } b;
 
-  memcpy (a.a, ax, 16);
-  do_encrypt_aligned (ctx, b.b, a.a);
-  memcpy (bx, b.b, 16);
+      memcpy (a.a, ax, 16);
+      do_encrypt_aligned (ctx, b.b, a.a);
+      memcpy (bx, b.b, 16);
+    }
+  else
+    {
+      do_encrypt_aligned (ctx, bx, ax);
+    }
 }
 
 
@@ -652,24 +667,33 @@ static void
 do_aesni (RIJNDAEL_context *ctx, int decrypt_flag,
           unsigned char *bx, const unsigned char *ax)
 {
-  /* BX and AX are not necessary correctly aligned.  Thus we need to
-     copy them here. */
-  unsigned char a[16] __attribute__ ((aligned (16)));
-  unsigned char b[16] __attribute__ ((aligned (16)));
+  if (decrypt_flag && !ctx->decryption_prepared )
+    {
+      prepare_decryption ( ctx );
+      ctx->decryption_prepared = 1;
+    }
 
-  memcpy (a, ax, 16);
-  if (decrypt_flag)
+  /* BX and AX are not necessary correctly aligned.  Thus we might
+     need to copy them here.  */
+  if (((size_t)ax & 0x0f) || ((size_t)bx & 0x0f))
     {
-      if ( !ctx->decryption_prepared )
-        {
-          prepare_decryption ( ctx );
-          ctx->decryption_prepared = 1;
-        }
-      do_aesni_dec_aligned (ctx, b, a);
+      unsigned char a[16] __attribute__ ((aligned (16)));
+      unsigned char b[16] __attribute__ ((aligned (16)));
+
+      memcpy (a, ax, 16);
+      if (decrypt_flag)
+        do_aesni_dec_aligned (ctx, b, a);
+      else
+        do_aesni_enc_aligned (ctx, b, a);
+      memcpy (bx, b, 16);
     }
   else
-    do_aesni_enc_aligned (ctx, b, a);
-  memcpy (bx, b, 16);
+    {
+      if (decrypt_flag)
+        do_aesni_dec_aligned (ctx, bx, ax);
+      else
+        do_aesni_enc_aligned (ctx, bx, ax);
+    }
 }
 #endif /*USE_AESNI*/
 
@@ -698,7 +722,7 @@ rijndael_encrypt (void *context, byte *b, const byte *a)
   else
     {
       do_encrypt (ctx, b, a);
-      _gcry_burn_stack (48 + 2*sizeof(int));
+      _gcry_burn_stack (56 + 2*sizeof(int));
     }
 }
 
@@ -903,19 +927,6 @@ do_decrypt_aligned (RIJNDAEL_context *ctx,
 static void
 do_decrypt (RIJNDAEL_context *ctx, byte *bx, const byte *ax)
 {
-  /* BX and AX are not necessary correctly aligned.  Thus we need to
-     copy them here. */
-  union
-  {
-    u32  dummy[4];
-    byte a[16];
-  } a;
-  union
-  {
-    u32  dummy[4];
-    byte b[16];
-  } b;
-
   if ( !ctx->decryption_prepared )
     {
       prepare_decryption ( ctx );
@@ -923,10 +934,29 @@ do_decrypt (RIJNDAEL_context *ctx, byte *bx, const byte *ax)
       ctx->decryption_prepared = 1;
     }
 
-  memcpy (a.a, ax, 16);
-  do_decrypt_aligned (ctx, b.b, a.a);
-  memcpy (bx, b.b, 16);
-#undef rk
+  /* BX and AX are not necessary correctly aligned.  Thus we might
+     need to copy them here.  We try to align to a 16 bytes. */
+  if (((size_t)ax & 0x0f) || ((size_t)bx & 0x0f))
+    {
+      union
+      {
+        u32  dummy[4];
+        byte a[16] ATTR_ALIGNED_16;
+      } a;
+      union
+      {
+        u32  dummy[4];
+        byte b[16] ATTR_ALIGNED_16;
+      } b;
+
+      memcpy (a.a, ax, 16);
+      do_decrypt_aligned (ctx, b.b, a.a);
+      memcpy (bx, b.b, 16);
+    }
+  else
+    {
+      do_decrypt_aligned (ctx, bx, ax);
+    }
 }
 
 
@@ -956,7 +986,7 @@ rijndael_decrypt (void *context, byte *b, const byte *a)
   else
     {
       do_decrypt (ctx, b, a);
-      _gcry_burn_stack (48+2*sizeof(int));
+      _gcry_burn_stack (56+2*sizeof(int));
     }
 }
 
diff --git a/tests/ChangeLog b/tests/ChangeLog
index ac79a28..9334521 100644
--- a/tests/ChangeLog
+++ b/tests/ChangeLog
@@ -1,3 +1,7 @@
+2011-02-14  Werner Koch  <wk at g10code.com>
+
+	* benchmark.c: Add option --alignment.
+
 2011-02-01  Werner Koch  <wk at g10code.com>
 
 	* curves.c: New.
diff --git a/tests/benchmark.c b/tests/benchmark.c
index 76dcd48..465f1b5 100644
--- a/tests/benchmark.c
+++ b/tests/benchmark.c
@@ -51,6 +51,9 @@ static int cipher_repetitions;
 /* Number of hash repetitions.  */
 static int hash_repetitions;
 
+/* Alignment of the buffers.  */
+static int buffer_alignment;
+
 /* Whether fips mode was active at startup.  */
 static int in_fips_mode;
 
@@ -502,6 +505,7 @@ cipher_bench ( const char *algoname )
   int keylen, blklen;
   char key[128];
   char *outbuf, *buf;
+  char *raw_outbuf, *raw_buf;
   size_t allocated_buflen, buflen;
   int repetitions;
   static struct { int mode; const char *name; int blocked; } modes[] = {
@@ -537,8 +541,16 @@ cipher_bench ( const char *algoname )
     }
   repetitions *= cipher_repetitions;
 
-  buf = gcry_xmalloc (allocated_buflen);
-  outbuf = gcry_xmalloc (allocated_buflen);
+  buf = raw_buf = gcry_xmalloc (allocated_buflen+15);
+  if (buffer_alignment)
+    while (((size_t)buf & 0x0f))
+      buf++;
+
+  outbuf = raw_outbuf = gcry_xmalloc (allocated_buflen+15);
+  if (buffer_alignment)
+    while (((size_t)outbuf & 0x0f))
+      outbuf++;
+
 
   if (!header_printed)
     {
@@ -667,8 +679,8 @@ cipher_bench ( const char *algoname )
     }
 
   putchar ('\n');
-  gcry_free (buf);
-  gcry_free (outbuf);
+  gcry_free (raw_buf);
+  gcry_free (raw_outbuf);
 }
 
 
@@ -1116,6 +1128,15 @@ main( int argc, char **argv )
               argc--; argv++;
             }
         }
+      else if (!strcmp (*argv, "--alignment"))
+        {
+          argc--; argv++;
+          if (argc)
+            {
+              buffer_alignment = atoi(*argv);
+              argc--; argv++;
+            }
+        }
       else if (!strcmp (*argv, "--fips"))
         {
           argc--; argv++;
@@ -1129,6 +1150,15 @@ main( int argc, char **argv )
         }
     }
 
+  switch (buffer_alignment)
+    {
+    case 0:
+    case 16:
+      break;
+    default:
+      die ("option --alignment not used with a value of 0 or 16\n");
+    }
+
   gcry_control (GCRYCTL_SET_VERBOSITY, (int)verbose);
 
   if (!gcry_check_version (GCRYPT_VERSION))

commit 5ede4ed784148422e3bd2a99ad0e87831f622aa9
Author: Werner Koch <wk at gnupg.org>
Date:   Mon Feb 14 19:18:20 2011 +0100

    Simplify context alignment and align the IV.

diff --git a/cipher/ChangeLog b/cipher/ChangeLog
index 670491d..9a59bde 100644
--- a/cipher/ChangeLog
+++ b/cipher/ChangeLog
@@ -1,3 +1,13 @@
+2011-02-14  Werner Koch  <wk at g10code.com>
+
+	* rijndael.c (RIJNDAEL_context): Reorder fields.  Chnage filedname
+	ROUNDS to rounds.  Move padlock_key into u1.
+	(keySched, keySched2): Rename macros to keyscherr and keyschdec
+	and change all users.
+	(padlockkey): New macro.  Change all users of padlock_key.
+	* cipher.c (NEED_16BYTE_ALIGNED_CONTEXT): Always define if using gcc.
+	(struct gcry_cipher_handle): Align U_IV to at least 16 byte.
+
 2011-02-13  Werner Koch  <wk at g10code.com>
 
 	* rijndael.c (USE_AESNI): New.  Define for ia32 and gcc >= 4.
diff --git a/cipher/cipher.c b/cipher/cipher.c
index 9e5bca5..92b3698 100644
--- a/cipher/cipher.c
+++ b/cipher/cipher.c
@@ -33,9 +33,12 @@
 #define CTX_MAGIC_NORMAL 0x24091964
 #define CTX_MAGIC_SECURE 0x46919042
 
+/* Try to use 16 byte aligned cipher context for better performance.
+   We use the aligned attribute, thus it is only possible to implement
+   this with gcc.  */
 #undef NEED_16BYTE_ALIGNED_CONTEXT
-#if defined (__i386__) && SIZEOF_UNSIGNED_LONG == 4 && defined (__GNUC__)
-#define NEED_16BYTE_ALIGNED_CONTEXT 1
+#if defined (__GNUC__)
+# define NEED_16BYTE_ALIGNED_CONTEXT 1
 #endif
 
 /* A dummy extraspec so that we do not need to tests the extraspec
@@ -198,11 +201,11 @@ struct gcry_cipher_handle
     unsigned int iv:1;  /* Set to 1 if a IV has been set.  */
   } marks;
 
-  /* The initialization vector.  To help code optimization we make
-     sure that it is aligned on an unsigned long and u32 boundary.  */
+  /* The initialization vector.  For best performance we make sure
+     that it is properly aligned.  In particular some implementations
+     of bulk operations expect an 16 byte aligned IV.  */
   union {
-    unsigned long dummy_iv;
-    u32 dummy_u32_iv;
+    cipher_context_alignment_t iv_align;
     unsigned char iv[MAX_BLOCKSIZE];
   } u_iv;
 
diff --git a/cipher/rijndael.c b/cipher/rijndael.c
index bebe163..56b55dc 100644
--- a/cipher/rijndael.c
+++ b/cipher/rijndael.c
@@ -97,32 +97,46 @@
 
 static const char *selftest(void);
 
+
+/* Our context object.  */
 typedef struct
 {
-  int   ROUNDS;             /* Key-length-dependent number of rounds.  */
-  int decryption_prepared;  /* The decryption key schedule is available.  */
-#ifdef USE_PADLOCK
-  int use_padlock;          /* Padlock shall be used.  */
-  /* The key as passed to the padlock engine.  */
-  unsigned char padlock_key[16] __attribute__ ((aligned (16)));
-#endif /*USE_PADLOCK*/
-#ifdef USE_AESNI
-  int use_aesni;           /* AES-NI shall be used.  */
-#endif /*USE_AESNI*/
+  /* The first fields are the keyschedule arrays.  This is so that
+     they are aligned on a 16 byte boundary if using gcc.  This
+     alignment is required for the AES-NI code and a good idea in any
+     case.  The alignment is guaranteed due to the way cipher.c
+     allocates the space for the context.  The PROPERLY_ALIGNED_TYPE
+     hack is used to force a minimal alignment if not using gcc of if
+     the alignment requirement is higher that 16 bytes.  */
   union
   {
     PROPERLY_ALIGNED_TYPE dummy;
     byte keyschedule[MAXROUNDS+1][4][4];
+#ifdef USE_PADLOCK
+    /* The key as passed to the padlock engine.  It is only used if
+       the padlock engine is used (USE_PADLOCK, below).  */
+    unsigned char padlock_key[16] __attribute__ ((aligned (16)));
+#endif /*USE_PADLOCK*/
   } u1;
   union
   {
     PROPERLY_ALIGNED_TYPE dummy;
     byte keyschedule[MAXROUNDS+1][4][4];
   } u2;
+  int rounds;               /* Key-length-dependent number of rounds.  */
+  int decryption_prepared;  /* The decryption key schedule is available.  */
+#ifdef USE_PADLOCK
+  int use_padlock;          /* Padlock shall be used.  */
+#endif /*USE_PADLOCK*/
+#ifdef USE_AESNI
+  int use_aesni;            /* AES-NI shall be used.  */
+#endif /*USE_AESNI*/
 } RIJNDAEL_context;
 
-#define keySched  u1.keyschedule
-#define keySched2 u2.keyschedule
+/* Macros defining alias for the keyschedules.  */
+#define keyschenc  u1.keyschedule
+#define keyschdec  u2.keyschedule
+#define padlockkey u1.padlock_key
 
 /* All the numbers.  */
 #include "rijndael-tables.h"
@@ -134,7 +148,7 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
 {
   static int initialized = 0;
   static const char *selftest_failed=0;
-  int ROUNDS;
+  int rounds;
   int i,j, r, t, rconpointer = 0;
   int KC;
   union
@@ -177,7 +191,7 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
 
   if( keylen == 128/8 )
     {
-      ROUNDS = 10;
+      rounds = 10;
       KC = 4;
 
       if (0)
@@ -186,7 +200,7 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
       else if ((_gcry_get_hw_features () & HWF_PADLOCK_AES))
         {
           ctx->use_padlock = 1;
-          memcpy (ctx->padlock_key, key, keylen);
+          memcpy (ctx->padlockkey, key, keylen);
         }
 #endif
 #ifdef USE_AESNI
@@ -198,7 +212,7 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
     }
   else if ( keylen == 192/8 )
     {
-      ROUNDS = 12;
+      rounds = 12;
       KC = 6;
 
       if (0)
@@ -212,7 +226,7 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
     }
   else if ( keylen == 256/8 )
     {
-      ROUNDS = 14;
+      rounds = 14;
       KC = 8;
 
       if (0)
@@ -227,7 +241,7 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
   else
     return GPG_ERR_INV_KEYLEN;
 
-  ctx->ROUNDS = ROUNDS;
+  ctx->rounds = rounds;
 
 #ifdef USE_PADLOCK
   if (ctx->use_padlock)
@@ -238,7 +252,7 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
   else
 #endif /*USE_PADLOCK*/
     {
-#define W (ctx->keySched)
+#define W (ctx->keyschenc)
       for (i = 0; i < keylen; i++)
         {
           k[i >> 2][i & 3] = key[i];
@@ -251,7 +265,7 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
       r = 0;
       t = 0;
       /* Copy values into round key array.  */
-      for (j = 0; (j < KC) && (r < ROUNDS + 1); )
+      for (j = 0; (j < KC) && (r < rounds + 1); )
         {
           for (; (j < KC) && (t < 4); j++, t++)
             {
@@ -264,7 +278,7 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
             }
         }
 
-      while (r < ROUNDS + 1)
+      while (r < rounds + 1)
         {
           /* While not enough round key material calculated calculate
              new values.  */
@@ -298,7 +312,7 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
             }
 
           /* Copy values into round key array.  */
-          for (j = 0; (j < KC) && (r < ROUNDS + 1); )
+          for (j = 0; (j < KC) && (r < rounds + 1); )
             {
               for (; (j < KC) && (t < 4); j++, t++)
                 {
@@ -343,12 +357,12 @@ prepare_decryption( RIJNDAEL_context *ctx )
       /* The AES-NI decrypt instructions use the Equivalent Inverse
          Cipher, thus we can't use the the standard decrypt key
          preparation.  */
-        m128i_t *ekey = (m128i_t*)ctx->keySched;
-        m128i_t *dkey = (m128i_t*)ctx->keySched2;
+        m128i_t *ekey = (m128i_t*)ctx->keyschenc;
+        m128i_t *dkey = (m128i_t*)ctx->keyschdec;
         int rr;
 
-        dkey[0] = ekey[ctx->ROUNDS];
-        for (r=1, rr=ctx->ROUNDS-1; r < ctx->ROUNDS; r++, rr--)
+        dkey[0] = ekey[ctx->rounds];
+        for (r=1, rr=ctx->rounds-1; r < ctx->rounds; r++, rr--)
           {
             asm volatile
               ("movdqu %[ekey], %%xmm1\n\t"
@@ -372,13 +386,13 @@ prepare_decryption( RIJNDAEL_context *ctx )
 
       for (r=0; r < MAXROUNDS+1; r++ )
         {
-          *((u32*)ctx->keySched2[r][0]) = *((u32*)ctx->keySched[r][0]);
-          *((u32*)ctx->keySched2[r][1]) = *((u32*)ctx->keySched[r][1]);
-          *((u32*)ctx->keySched2[r][2]) = *((u32*)ctx->keySched[r][2]);
-          *((u32*)ctx->keySched2[r][3]) = *((u32*)ctx->keySched[r][3]);
+          *((u32*)ctx->keyschdec[r][0]) = *((u32*)ctx->keyschenc[r][0]);
+          *((u32*)ctx->keyschdec[r][1]) = *((u32*)ctx->keyschenc[r][1]);
+          *((u32*)ctx->keyschdec[r][2]) = *((u32*)ctx->keyschenc[r][2]);
+          *((u32*)ctx->keyschdec[r][3]) = *((u32*)ctx->keyschenc[r][3]);
         }
-#define W (ctx->keySched2)
-      for (r = 1; r < ctx->ROUNDS; r++)
+#define W (ctx->keyschdec)
+      for (r = 1; r < ctx->rounds; r++)
         {
           w = W[r][0];
           *((u32*)w) = *((u32*)U1[w[0]]) ^ *((u32*)U2[w[1]])
@@ -408,8 +422,8 @@ static void
 do_encrypt_aligned (const RIJNDAEL_context *ctx,
                     unsigned char *b, const unsigned char *a)
 {
-#define rk (ctx->keySched)
-  int ROUNDS = ctx->ROUNDS;
+#define rk (ctx->keyschenc)
+  int rounds = ctx->rounds;
   int r;
   union
   {
@@ -438,7 +452,7 @@ do_encrypt_aligned (const RIJNDAEL_context *ctx,
                         ^ *((u32*)T3[u.temp[1][2]])
                         ^ *((u32*)T4[u.temp[2][3]]));
 
-  for (r = 1; r < ROUNDS-1; r++)
+  for (r = 1; r < rounds-1; r++)
     {
       *((u32*)u.temp[0]) = *((u32*)(b   )) ^ *((u32*)rk[r][0]);
       *((u32*)u.temp[1]) = *((u32*)(b+ 4)) ^ *((u32*)rk[r][1]);
@@ -464,10 +478,10 @@ do_encrypt_aligned (const RIJNDAEL_context *ctx,
     }
 
   /* Last round is special. */
-  *((u32*)u.temp[0]) = *((u32*)(b   )) ^ *((u32*)rk[ROUNDS-1][0]);
-  *((u32*)u.temp[1]) = *((u32*)(b+ 4)) ^ *((u32*)rk[ROUNDS-1][1]);
-  *((u32*)u.temp[2]) = *((u32*)(b+ 8)) ^ *((u32*)rk[ROUNDS-1][2]);
-  *((u32*)u.temp[3]) = *((u32*)(b+12)) ^ *((u32*)rk[ROUNDS-1][3]);
+  *((u32*)u.temp[0]) = *((u32*)(b   )) ^ *((u32*)rk[rounds-1][0]);
+  *((u32*)u.temp[1]) = *((u32*)(b+ 4)) ^ *((u32*)rk[rounds-1][1]);
+  *((u32*)u.temp[2]) = *((u32*)(b+ 8)) ^ *((u32*)rk[rounds-1][2]);
+  *((u32*)u.temp[3]) = *((u32*)(b+12)) ^ *((u32*)rk[rounds-1][3]);
   b[ 0] = T1[u.temp[0][0]][1];
   b[ 1] = T1[u.temp[1][1]][1];
   b[ 2] = T1[u.temp[2][2]][1];
@@ -484,10 +498,10 @@ do_encrypt_aligned (const RIJNDAEL_context *ctx,
   b[13] = T1[u.temp[0][1]][1];
   b[14] = T1[u.temp[1][2]][1];
   b[15] = T1[u.temp[2][3]][1];
-  *((u32*)(b   )) ^= *((u32*)rk[ROUNDS][0]);
-  *((u32*)(b+ 4)) ^= *((u32*)rk[ROUNDS][1]);
-  *((u32*)(b+ 8)) ^= *((u32*)rk[ROUNDS][2]);
-  *((u32*)(b+12)) ^= *((u32*)rk[ROUNDS][3]);
+  *((u32*)(b   )) ^= *((u32*)rk[rounds][0]);
+  *((u32*)(b+ 4)) ^= *((u32*)rk[rounds][1]);
+  *((u32*)(b+ 8)) ^= *((u32*)rk[rounds][2]);
+  *((u32*)(b+12)) ^= *((u32*)rk[rounds][3]);
 #undef rk
 }
 
@@ -531,7 +545,7 @@ do_padlock (const RIJNDAEL_context *ctx, int decrypt_flag,
   /* The control word fields are:
       127:12   11:10 9     8     7     6     5     4     3:0
       RESERVED KSIZE CRYPT INTER KEYGN CIPHR ALIGN DGEST ROUND  */
-  cword[0] = (ctx->ROUNDS & 15);  /* (The mask is just a safeguard.)  */
+  cword[0] = (ctx->rounds & 15);  /* (The mask is just a safeguard.)  */
   cword[1] = 0;
   cword[2] = 0;
   cword[3] = 0;
@@ -548,7 +562,7 @@ do_padlock (const RIJNDAEL_context *ctx, int decrypt_flag,
      ".byte 0xf3, 0x0f, 0xa7, 0xc8\n\t" /* REP XSTORE ECB. */
      "xchg %3, %%ebx\n"    /* Restore GOT register.  */
      : /* No output */
-     : "S" (a), "D" (b), "d" (cword), "r" (ctx->padlock_key)
+     : "S" (a), "D" (b), "d" (cword), "r" (ctx->padlockkey)
      : "%ecx", "cc", "memory"
      );
 
@@ -579,7 +593,7 @@ do_aesni_enc_aligned (const RIJNDAEL_context *ctx,
   int r;
   m128i_t *key;
 
-  key = (m128i_t*)ctx->keySched;
+  key = (m128i_t*)ctx->keyschenc;
 
   asm volatile ("movdqu %[src], %%xmm0\n\t" /* xmm0 := *a     */
                 "movdqu %[key], %%xmm1\n\t"
@@ -587,7 +601,7 @@ do_aesni_enc_aligned (const RIJNDAEL_context *ctx,
                 : : [src] "m" (*a), [key] "m" (*key));
 
   key++;
-  for (r = 1; r < ctx->ROUNDS; r++)
+  for (r = 1; r < ctx->rounds; r++)
     {
       asm volatile ("movdqu %[key], %%xmm1\n\t"
                     /*"aesenc %%xmm1, %%xmm0"*/
@@ -611,14 +625,14 @@ do_aesni_dec_aligned (const RIJNDAEL_context *ctx,
   int r;
   m128i_t *key;
 
-  key = (m128i_t*)ctx->keySched2;
+  key = (m128i_t*)ctx->keyschdec;
 
   asm volatile ("movdqu %[src], %%xmm0\n\t" /* xmm0 := *a     */
                 "movdqu %[key], %%xmm1\n\t"
                 "pxor   %%xmm1, %%xmm0"     /* xmm0 ^= key[0] */
                 : : [src] "m" (*a), [key] "m" (key[0]));
 
-  for (r = 1; r < ctx->ROUNDS; r++)
+  for (r = 1; r < ctx->rounds; r++)
     {
       asm volatile ("movdqu %[key], %%xmm1\n\t"
                     /*"aesdec %%xmm1, %%xmm0"*/
@@ -800,8 +814,8 @@ static void
 do_decrypt_aligned (RIJNDAEL_context *ctx,
                     unsigned char *b, const unsigned char *a)
 {
-#define rk  (ctx->keySched2)
-  int ROUNDS = ctx->ROUNDS;
+#define rk  (ctx->keyschdec)
+  int rounds = ctx->rounds;
   int r;
   union
   {
@@ -810,10 +824,10 @@ do_decrypt_aligned (RIJNDAEL_context *ctx,
   } u;
 
 
-  *((u32*)u.temp[0]) = *((u32*)(a   )) ^ *((u32*)rk[ROUNDS][0]);
-  *((u32*)u.temp[1]) = *((u32*)(a+ 4)) ^ *((u32*)rk[ROUNDS][1]);
-  *((u32*)u.temp[2]) = *((u32*)(a+ 8)) ^ *((u32*)rk[ROUNDS][2]);
-  *((u32*)u.temp[3]) = *((u32*)(a+12)) ^ *((u32*)rk[ROUNDS][3]);
+  *((u32*)u.temp[0]) = *((u32*)(a   )) ^ *((u32*)rk[rounds][0]);
+  *((u32*)u.temp[1]) = *((u32*)(a+ 4)) ^ *((u32*)rk[rounds][1]);
+  *((u32*)u.temp[2]) = *((u32*)(a+ 8)) ^ *((u32*)rk[rounds][2]);
+  *((u32*)u.temp[3]) = *((u32*)(a+12)) ^ *((u32*)rk[rounds][3]);
 
   *((u32*)(b   ))    = (*((u32*)T5[u.temp[0][0]])
                         ^ *((u32*)T6[u.temp[3][1]])
@@ -832,7 +846,7 @@ do_decrypt_aligned (RIJNDAEL_context *ctx,
                         ^ *((u32*)T7[u.temp[1][2]])
                         ^ *((u32*)T8[u.temp[0][3]]));
 
-  for (r = ROUNDS-1; r > 1; r--)
+  for (r = rounds-1; r > 1; r--)
     {
       *((u32*)u.temp[0]) = *((u32*)(b   )) ^ *((u32*)rk[r][0]);
       *((u32*)u.temp[1]) = *((u32*)(b+ 4)) ^ *((u32*)rk[r][1]);

-----------------------------------------------------------------------

Summary of changes:
 cipher/ChangeLog  |   15 +++
 cipher/cipher.c   |   15 ++--
 cipher/rijndael.c |  252 +++++++++++++++++++++++++++++++----------------------
 tests/ChangeLog   |    4 +
 tests/benchmark.c |   38 +++++++-
 5 files changed, 210 insertions(+), 114 deletions(-)


hooks/post-receive
-- 
The GNU crypto library
http://git.gnupg.org




More information about the Gnupg-commits mailing list