[git] GCRYPT - branch, master, updated. libgcrypt-1.5.0-13-gba71277

by Werner Koch cvs at cvs.gnupg.org
Thu Sep 15 19:59:46 CEST 2011


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "The GNU crypto library".

The branch, master has been updated
       via  ba7127721035ee0937167e16465cc9f40b39257d (commit)
       via  ad792d462d001858af89485380bf8f8a4a8dcd5d (commit)
      from  889a25ed3333d1d7657b4b59ae21f6e8458f9027 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit ba7127721035ee0937167e16465cc9f40b39257d
Merge: 889a25e ad792d4
Author: Werner Koch <wk at gnupg.org>
Date:   Thu Sep 15 18:55:28 2011 +0200

    Factor cipher mode code out to separate files.
    
    Fixed Changelog and Makefile.
    Added missing cipher-aeswrap.c file.

diff --cc cipher/ChangeLog
index 0bbbbb4,f5237dd..cc59935
--- a/cipher/ChangeLog
+++ b/cipher/ChangeLog
@@@ -1,13 -1,17 +1,27 @@@
 -2011-08-03  Werner Koch  <wk at g10code.com>
 +2011-09-15  Werner Koch  <wk at g10code.com>
  
+ 	* cipher-cbc.c, cipher-cfb.c, cipher-ofb.c, cipher-ctr.c: New.
+ 	* cipher-aeswrap.c: New.
+ 	* cipher-internal.h: New.
+ 	* cipher.c (cipher_context_alignment_t, struct gcry_cipher_handle)
+ 	(CTX_MAGIC_NORMAL, CTX_MAGIC_SECURE, NEED_16BYTE_ALIGNED_CONTEXT)
+ 	(MAX_BLOCKSIZE): Move to cipher-internal.h.
+ 	(do_aeswrap_encrypt, do_aeswrap_encrypt)
+ 	(do_cbc_encrypt, do_cbc_decrypt, do_ctr_encrypt, do_ctr_decrypt)
+ 	(do_ofb_encrypt, do_ofb_decrypt, do_ctr_encrypt): Move to the
+ 	respective new cipher-foo.c files.
+ 	(do_ctr_decrypt): Remove.
+ 
++2011-09-15  Werner Koch  <wk at g10code.com>
++
 +	* pubkey.c (gcry_pk_list): Remove.
 +	(gcry_pk_unregister): Remove.
 +	* md.c (gcry_md_list): Remove.
 +	(gcry_md_unregister): Remove.
 +	* cipher.c (gcry_cipher_list): Remove.
 +	(gcry_cipher_unregister): Remove.
 +	* ac.c: Remove.
 +
  2011-06-29  Werner Koch  <wk at g10code.com>
  
  	* cipher.c (cipher_get_keylen): Return zero for an invalid algorithm.
diff --cc cipher/Makefile.am
index eb2ce28,6270fd3..dcb4a47
--- a/cipher/Makefile.am
+++ b/cipher/Makefile.am
@@@ -35,7 -35,9 +35,9 @@@ libcipher_la_DEPENDENCIES = $(GCRYPT_MO
  libcipher_la_LIBADD = $(GCRYPT_MODULES)
  
  libcipher_la_SOURCES = \
- cipher.c pubkey.c md.c kdf.c \
+ cipher.c cipher-internal.h \
+ cipher-cbc.c cipher-cfb.c cipher-ofb.c cipher-ctr.c cipher-aeswrap.c \
 -pubkey.c ac.c md.c kdf.c \
++pubkey.c md.c kdf.c \
  hmac-tests.c \
  bithelp.h  \
  primegen.c  \
diff --cc cipher/cipher-aeswrap.c
index 0000000,0000000..b559e7f
new file mode 100644
--- /dev/null
+++ b/cipher/cipher-aeswrap.c
@@@ -1,0 -1,0 +1,196 @@@
++/* cipher-aeswrap.c  - Generic AESWRAP mode implementation
++ * Copyright (C) 2009, 2011 Free Software Foundation, Inc.
++ *
++ * This file is part of Libgcrypt.
++ *
++ * Libgcrypt is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser general Public License as
++ * published by the Free Software Foundation; either version 2.1 of
++ * the License, or (at your option) any later version.
++ *
++ * Libgcrypt is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <config.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include <errno.h>
++
++#include "g10lib.h"
++#include "cipher.h"
++#include "ath.h"
++#include "./cipher-internal.h"
++
++
++/* Perform the AES-Wrap algorithm as specified by RFC3394.  We
++   implement this as a mode usable with any cipher algorithm of
++   blocksize 128.  */
++gcry_err_code_t
++_gcry_cipher_aeswrap_encrypt (gcry_cipher_hd_t c,
++                              byte *outbuf, unsigned int outbuflen,
++                              const byte *inbuf, unsigned int inbuflen )
++{
++  int j, x;
++  unsigned int n, i;
++  unsigned char *r, *a, *b;
++  unsigned char t[8];
++
++#if MAX_BLOCKSIZE < 8
++#error Invalid block size
++#endif
++  /* We require a cipher with a 128 bit block length.  */
++  if (c->cipher->blocksize != 16)
++    return GPG_ERR_INV_LENGTH;
++
++  /* The output buffer must be able to hold the input data plus one
++     additional block.  */
++  if (outbuflen < inbuflen + 8)
++    return GPG_ERR_BUFFER_TOO_SHORT;
++  /* Input data must be multiple of 64 bits.  */
++  if (inbuflen % 8)
++    return GPG_ERR_INV_ARG;
++
++  n = inbuflen / 8;
++
++  /* We need at least two 64 bit blocks.  */
++  if (n < 2)
++    return GPG_ERR_INV_ARG;
++
++  r = outbuf;
++  a = outbuf;  /* We store A directly in OUTBUF.  */
++  b = c->u_ctr.ctr;  /* B is also used to concatenate stuff.  */
++
++  /* If an IV has been set we use that IV as the Alternative Initial
++     Value; if it has not been set we use the standard value.  */
++  if (c->marks.iv)
++    memcpy (a, c->u_iv.iv, 8);
++  else
++    memset (a, 0xa6, 8);
++
++  /* Copy the inbuf to the outbuf. */
++  memmove (r+8, inbuf, inbuflen);
++
++  memset (t, 0, sizeof t); /* t := 0.  */
++
++  for (j = 0; j <= 5; j++)
++    {
++      for (i = 1; i <= n; i++)
++        {
++          /* B := AES_k( A | R[i] ) */
++          memcpy (b, a, 8);
++          memcpy (b+8, r+i*8, 8);
++          c->cipher->encrypt (&c->context.c, b, b);
++          /* t := t + 1  */
++	  for (x = 7; x >= 0; x--)
++	    {
++	      t[x]++;
++	      if (t[x])
++		break;
++	    }
++          /* A := MSB_64(B) ^ t */
++          for (x=0; x < 8; x++)
++            a[x] = b[x] ^ t[x];
++          /* R[i] := LSB_64(B) */
++          memcpy (r+i*8, b+8, 8);
++        }
++   }
++
++  return 0;
++}
++
++/* Perform the AES-Unwrap algorithm as specified by RFC3394.  We
++   implement this as a mode usable with any cipher algorithm of
++   blocksize 128.  */
++gcry_err_code_t
++_gcry_cipher_aeswrap_decrypt (gcry_cipher_hd_t c,
++                              byte *outbuf, unsigned int outbuflen,
++                              const byte *inbuf, unsigned int inbuflen)
++{
++  int j, x;
++  unsigned int n, i;
++  unsigned char *r, *a, *b;
++  unsigned char t[8];
++
++#if MAX_BLOCKSIZE < 8
++#error Invalid block size
++#endif
++  /* We require a cipher with a 128 bit block length.  */
++  if (c->cipher->blocksize != 16)
++    return GPG_ERR_INV_LENGTH;
++
++  /* The output buffer must be able to hold the input data minus one
++     additional block.  Fixme: The caller has more restrictive checks
++     - we may want to fix them for this mode.  */
++  if (outbuflen + 8  < inbuflen)
++    return GPG_ERR_BUFFER_TOO_SHORT;
++  /* Input data must be multiple of 64 bits.  */
++  if (inbuflen % 8)
++    return GPG_ERR_INV_ARG;
++
++  n = inbuflen / 8;
++
++  /* We need at least three 64 bit blocks.  */
++  if (n < 3)
++    return GPG_ERR_INV_ARG;
++
++  r = outbuf;
++  a = c->lastiv;  /* We use c->LASTIV as buffer for A.  */
++  b = c->u_ctr.ctr;     /* B is also used to concatenate stuff.  */
++
++  /* Copy the inbuf to the outbuf and save A. */
++  memcpy (a, inbuf, 8);
++  memmove (r, inbuf+8, inbuflen-8);
++  n--; /* Reduce to actual number of data blocks.  */
++
++  /* t := 6 * n  */
++  i = n * 6;  /* The range is valid because: n = inbuflen / 8 - 1.  */
++  for (x=0; x < 8 && x < sizeof (i); x++)
++    t[7-x] = i >> (8*x);
++  for (; x < 8; x++)
++    t[7-x] = 0;
++
++  for (j = 5; j >= 0; j--)
++    {
++      for (i = n; i >= 1; i--)
++        {
++          /* B := AES_k^1( (A ^ t)| R[i] ) */
++          for (x = 0; x < 8; x++)
++            b[x] = a[x] ^ t[x];
++          memcpy (b+8, r+(i-1)*8, 8);
++          c->cipher->decrypt (&c->context.c, b, b);
++          /* t := t - 1  */
++	  for (x = 7; x >= 0; x--)
++	    {
++	      t[x]--;
++	      if (t[x] != 0xff)
++		break;
++	    }
++          /* A := MSB_64(B) */
++          memcpy (a, b, 8);
++          /* R[i] := LSB_64(B) */
++          memcpy (r+(i-1)*8, b+8, 8);
++        }
++   }
++
++  /* If an IV has been set we compare against this Alternative Initial
++     Value; if it has not been set we compare against the standard IV.  */
++  if (c->marks.iv)
++    j = memcmp (a, c->u_iv.iv, 8);
++  else
++    {
++      for (j=0, x=0; x < 8; x++)
++        if (a[x] != 0xa6)
++          {
++            j=1;
++            break;
++          }
++    }
++  return j? GPG_ERR_CHECKSUM : 0;
++}

commit ad792d462d001858af89485380bf8f8a4a8dcd5d
Author: Werner Koch <wk at gnupg.org>
Date:   Wed Aug 3 21:34:39 2011 +0200

    Factor cipher mode code out to separate files.
    
    This is a preparation for adding more modes which are more complicated
    and thus ask for separate file.  For uniformity we do this for all
    modes except ECB.  It has also the advantage that it makes CPU specific
    variants of the code more easy to implement (e.g. the XOR operations).

diff --git a/cipher/ChangeLog b/cipher/ChangeLog
index f061d01..f5237dd 100644
--- a/cipher/ChangeLog
+++ b/cipher/ChangeLog
@@ -1,3 +1,17 @@
+2011-08-03  Werner Koch  <wk at g10code.com>
+
+	* cipher-cbc.c, cipher-cfb.c, cipher-ofb.c, cipher-ctr.c: New.
+	* cipher-aeswrap.c: New.
+	* cipher-internal.h: New.
+	* cipher.c (cipher_context_alignment_t, struct gcry_cipher_handle)
+	(CTX_MAGIC_NORMAL, CTX_MAGIC_SECURE, NEED_16BYTE_ALIGNED_CONTEXT)
+	(MAX_BLOCKSIZE): Move to cipher-internal.h.
+	(do_aeswrap_encrypt, do_aeswrap_encrypt)
+	(do_cbc_encrypt, do_cbc_decrypt, do_ctr_encrypt, do_ctr_decrypt)
+	(do_ofb_encrypt, do_ofb_decrypt, do_ctr_encrypt): Move to the
+	respective new cipher-foo.c files.
+	(do_ctr_decrypt): Remove.
+
 2011-06-29  Werner Koch  <wk at g10code.com>
 
 	* cipher.c (cipher_get_keylen): Return zero for an invalid algorithm.
diff --git a/cipher/Makefile.am b/cipher/Makefile.am
index cbeace8..6270fd3 100644
--- a/cipher/Makefile.am
+++ b/cipher/Makefile.am
@@ -35,7 +35,9 @@ libcipher_la_DEPENDENCIES = $(GCRYPT_MODULES)
 libcipher_la_LIBADD = $(GCRYPT_MODULES)
 
 libcipher_la_SOURCES = \
-cipher.c pubkey.c ac.c md.c kdf.c \
+cipher.c cipher-internal.h \
+cipher-cbc.c cipher-cfb.c cipher-ofb.c cipher-ctr.c cipher-aeswrap.c \
+pubkey.c ac.c md.c kdf.c \
 hmac-tests.c \
 bithelp.h  \
 primegen.c  \
diff --git a/cipher/cipher-cbc.c b/cipher/cipher-cbc.c
new file mode 100644
index 0000000..b852589
--- /dev/null
+++ b/cipher/cipher-cbc.c
@@ -0,0 +1,187 @@
+/* cipher-cbc.c  - Generic CBC mode implementation
+ * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003
+ *               2005, 2007, 2008, 2009, 2011 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser general Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <config.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include "g10lib.h"
+#include "cipher.h"
+#include "ath.h"
+#include "./cipher-internal.h"
+
+
+
+gcry_err_code_t
+_gcry_cipher_cbc_encrypt (gcry_cipher_hd_t c,
+                          unsigned char *outbuf, unsigned int outbuflen,
+                          const unsigned char *inbuf, unsigned int inbuflen)
+{
+  unsigned int n;
+  unsigned char *ivp;
+  int i;
+  size_t blocksize = c->cipher->blocksize;
+  unsigned nblocks = inbuflen / blocksize;
+
+  if (outbuflen < ((c->flags & GCRY_CIPHER_CBC_MAC)? blocksize : inbuflen))
+    return GPG_ERR_BUFFER_TOO_SHORT;
+
+  if ((inbuflen % c->cipher->blocksize)
+      && !(inbuflen > c->cipher->blocksize
+           && (c->flags & GCRY_CIPHER_CBC_CTS)))
+    return GPG_ERR_INV_LENGTH;
+
+  if ((c->flags & GCRY_CIPHER_CBC_CTS) && inbuflen > blocksize)
+    {
+      if ((inbuflen % blocksize) == 0)
+	nblocks--;
+    }
+
+  if (c->bulk.cbc_enc)
+    {
+      c->bulk.cbc_enc (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks,
+                       (c->flags & GCRY_CIPHER_CBC_MAC));
+      inbuf  += nblocks * blocksize;
+      if (!(c->flags & GCRY_CIPHER_CBC_MAC))
+        outbuf += nblocks * blocksize;
+    }
+  else
+    {
+      for (n=0; n < nblocks; n++ )
+        {
+          for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
+            outbuf[i] = inbuf[i] ^ *ivp++;
+          c->cipher->encrypt ( &c->context.c, outbuf, outbuf );
+          memcpy (c->u_iv.iv, outbuf, blocksize );
+          inbuf  += blocksize;
+          if (!(c->flags & GCRY_CIPHER_CBC_MAC))
+            outbuf += blocksize;
+        }
+    }
+
+  if ((c->flags & GCRY_CIPHER_CBC_CTS) && inbuflen > blocksize)
+    {
+      /* We have to be careful here, since outbuf might be equal to
+         inbuf.  */
+      int restbytes;
+      unsigned char b;
+
+      if ((inbuflen % blocksize) == 0)
+        restbytes = blocksize;
+      else
+        restbytes = inbuflen % blocksize;
+
+      outbuf -= blocksize;
+      for (ivp = c->u_iv.iv, i = 0; i < restbytes; i++)
+        {
+          b = inbuf[i];
+          outbuf[blocksize + i] = outbuf[i];
+          outbuf[i] = b ^ *ivp++;
+        }
+      for (; i < blocksize; i++)
+        outbuf[i] = 0 ^ *ivp++;
+
+      c->cipher->encrypt (&c->context.c, outbuf, outbuf);
+      memcpy (c->u_iv.iv, outbuf, blocksize);
+    }
+
+  return 0;
+}
+
+
+gcry_err_code_t
+_gcry_cipher_cbc_decrypt (gcry_cipher_hd_t c,
+                          unsigned char *outbuf, unsigned int outbuflen,
+                          const unsigned char *inbuf, unsigned int inbuflen)
+{
+  unsigned int n;
+  unsigned char *ivp;
+  int i;
+  size_t blocksize = c->cipher->blocksize;
+  unsigned int nblocks = inbuflen / blocksize;
+
+  if (outbuflen < inbuflen)
+    return GPG_ERR_BUFFER_TOO_SHORT;
+
+  if ((inbuflen % c->cipher->blocksize)
+      && !(inbuflen > c->cipher->blocksize
+           && (c->flags & GCRY_CIPHER_CBC_CTS)))
+    return GPG_ERR_INV_LENGTH;
+
+  if ((c->flags & GCRY_CIPHER_CBC_CTS) && inbuflen > blocksize)
+    {
+      nblocks--;
+      if ((inbuflen % blocksize) == 0)
+	nblocks--;
+      memcpy (c->lastiv, c->u_iv.iv, blocksize);
+    }
+
+  if (c->bulk.cbc_dec)
+    {
+      c->bulk.cbc_dec (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks);
+      inbuf  += nblocks * blocksize;
+      outbuf += nblocks * blocksize;
+    }
+  else
+    {
+      for (n=0; n < nblocks; n++ )
+        {
+          /* Because outbuf and inbuf might be the same, we have to
+           * save the original ciphertext block.  We use LASTIV for
+           * this here because it is not used otherwise. */
+          memcpy (c->lastiv, inbuf, blocksize);
+          c->cipher->decrypt ( &c->context.c, outbuf, inbuf );
+          for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
+	    outbuf[i] ^= *ivp++;
+          memcpy(c->u_iv.iv, c->lastiv, blocksize );
+          inbuf  += c->cipher->blocksize;
+          outbuf += c->cipher->blocksize;
+        }
+    }
+
+  if ((c->flags & GCRY_CIPHER_CBC_CTS) && inbuflen > blocksize)
+    {
+      int restbytes;
+
+      if ((inbuflen % blocksize) == 0)
+        restbytes = blocksize;
+      else
+        restbytes = inbuflen % blocksize;
+
+      memcpy (c->lastiv, c->u_iv.iv, blocksize );         /* Save Cn-2. */
+      memcpy (c->u_iv.iv, inbuf + blocksize, restbytes ); /* Save Cn. */
+
+      c->cipher->decrypt ( &c->context.c, outbuf, inbuf );
+      for (ivp=c->u_iv.iv,i=0; i < restbytes; i++ )
+        outbuf[i] ^= *ivp++;
+
+      memcpy(outbuf + blocksize, outbuf, restbytes);
+      for(i=restbytes; i < blocksize; i++)
+        c->u_iv.iv[i] = outbuf[i];
+      c->cipher->decrypt (&c->context.c, outbuf, c->u_iv.iv);
+      for(ivp=c->lastiv,i=0; i < blocksize; i++ )
+        outbuf[i] ^= *ivp++;
+      /* c->lastiv is now really lastlastiv, does this matter? */
+    }
+
+  return 0;
+}
diff --git a/cipher/cipher-cfb.c b/cipher/cipher-cfb.c
new file mode 100644
index 0000000..f4152b9
--- /dev/null
+++ b/cipher/cipher-cfb.c
@@ -0,0 +1,215 @@
+/* cipher-cfb.c  - Generic CFB mode implementation
+ * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003
+ *               2005, 2007, 2008, 2009, 2011 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser general Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <config.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include "g10lib.h"
+#include "cipher.h"
+#include "ath.h"
+#include "./cipher-internal.h"
+
+
+gcry_err_code_t
+_gcry_cipher_cfb_encrypt (gcry_cipher_hd_t c,
+                          unsigned char *outbuf, unsigned int outbuflen,
+                          const unsigned char *inbuf, unsigned int inbuflen)
+{
+  unsigned char *ivp;
+  size_t blocksize = c->cipher->blocksize;
+  size_t blocksize_x_2 = blocksize + blocksize;
+
+  if (outbuflen < inbuflen)
+    return GPG_ERR_BUFFER_TOO_SHORT;
+
+  if ( inbuflen <= c->unused )
+    {
+      /* Short enough to be encoded by the remaining XOR mask. */
+      /* XOR the input with the IV and store input into IV. */
+      for (ivp=c->u_iv.iv+c->cipher->blocksize - c->unused;
+           inbuflen;
+           inbuflen--, c->unused-- )
+        *outbuf++ = (*ivp++ ^= *inbuf++);
+      return 0;
+    }
+
+  if ( c->unused )
+    {
+      /* XOR the input with the IV and store input into IV */
+      inbuflen -= c->unused;
+      for(ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
+        *outbuf++ = (*ivp++ ^= *inbuf++);
+    }
+
+  /* Now we can process complete blocks.  We use a loop as long as we
+     have at least 2 blocks and use conditions for the rest.  This
+     also allows to use a bulk encryption function if available.  */
+  if (inbuflen >= blocksize_x_2 && c->bulk.cfb_enc)
+    {
+      unsigned int nblocks = inbuflen / blocksize;
+      c->bulk.cfb_enc (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks);
+      outbuf += nblocks * blocksize;
+      inbuf  += nblocks * blocksize;
+      inbuflen -= nblocks * blocksize;
+    }
+  else
+    {
+      while ( inbuflen >= blocksize_x_2 )
+        {
+          int i;
+          /* Encrypt the IV. */
+          c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
+          /* XOR the input with the IV and store input into IV.  */
+          for(ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
+            *outbuf++ = (*ivp++ ^= *inbuf++);
+          inbuflen -= blocksize;
+        }
+    }
+
+  if ( inbuflen >= blocksize )
+    {
+      int i;
+      /* Save the current IV and then encrypt the IV. */
+      memcpy( c->lastiv, c->u_iv.iv, blocksize );
+      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
+      /* XOR the input with the IV and store input into IV */
+      for(ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
+        *outbuf++ = (*ivp++ ^= *inbuf++);
+      inbuflen -= blocksize;
+    }
+  if ( inbuflen )
+    {
+      /* Save the current IV and then encrypt the IV. */
+      memcpy( c->lastiv, c->u_iv.iv, blocksize );
+      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
+      c->unused = blocksize;
+      /* Apply the XOR. */
+      c->unused -= inbuflen;
+      for(ivp=c->u_iv.iv; inbuflen; inbuflen-- )
+        *outbuf++ = (*ivp++ ^= *inbuf++);
+    }
+  return 0;
+}
+
+
+gcry_err_code_t
+_gcry_cipher_cfb_decrypt (gcry_cipher_hd_t c,
+                          unsigned char *outbuf, unsigned int outbuflen,
+                          const unsigned char *inbuf, unsigned int inbuflen)
+{
+  unsigned char *ivp;
+  unsigned long temp;
+  int i;
+  size_t blocksize = c->cipher->blocksize;
+  size_t blocksize_x_2 = blocksize + blocksize;
+
+  if (outbuflen < inbuflen)
+    return GPG_ERR_BUFFER_TOO_SHORT;
+
+  if (inbuflen <= c->unused)
+    {
+      /* Short enough to be encoded by the remaining XOR mask. */
+      /* XOR the input with the IV and store input into IV. */
+      for (ivp=c->u_iv.iv+blocksize - c->unused;
+           inbuflen;
+           inbuflen--, c->unused--)
+        {
+          temp = *inbuf++;
+          *outbuf++ = *ivp ^ temp;
+          *ivp++ = temp;
+        }
+      return 0;
+    }
+
+  if (c->unused)
+    {
+      /* XOR the input with the IV and store input into IV. */
+      inbuflen -= c->unused;
+      for (ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
+        {
+          temp = *inbuf++;
+          *outbuf++ = *ivp ^ temp;
+          *ivp++ = temp;
+        }
+    }
+
+  /* Now we can process complete blocks.  We use a loop as long as we
+     have at least 2 blocks and use conditions for the rest.  This
+     also allows to use a bulk encryption function if available.  */
+  if (inbuflen >= blocksize_x_2 && c->bulk.cfb_dec)
+    {
+      unsigned int nblocks = inbuflen / blocksize;
+      c->bulk.cfb_dec (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks);
+      outbuf += nblocks * blocksize;
+      inbuf  += nblocks * blocksize;
+      inbuflen -= nblocks * blocksize;
+    }
+  else
+    {
+      while (inbuflen >= blocksize_x_2 )
+        {
+          /* Encrypt the IV. */
+          c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
+          /* XOR the input with the IV and store input into IV. */
+          for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
+            {
+              temp = *inbuf++;
+              *outbuf++ = *ivp ^ temp;
+              *ivp++ = temp;
+            }
+          inbuflen -= blocksize;
+        }
+    }
+
+  if (inbuflen >= blocksize )
+    {
+      /* Save the current IV and then encrypt the IV. */
+      memcpy ( c->lastiv, c->u_iv.iv, blocksize);
+      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
+      /* XOR the input with the IV and store input into IV */
+      for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
+        {
+          temp = *inbuf++;
+          *outbuf++ = *ivp ^ temp;
+          *ivp++ = temp;
+        }
+      inbuflen -= blocksize;
+    }
+
+  if (inbuflen)
+    {
+      /* Save the current IV and then encrypt the IV. */
+      memcpy ( c->lastiv, c->u_iv.iv, blocksize );
+      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
+      c->unused = blocksize;
+      /* Apply the XOR. */
+      c->unused -= inbuflen;
+      for (ivp=c->u_iv.iv; inbuflen; inbuflen-- )
+        {
+          temp = *inbuf++;
+          *outbuf++ = *ivp ^ temp;
+          *ivp++ = temp;
+        }
+    }
+  return 0;
+}
diff --git a/cipher/cipher-ctr.c b/cipher/cipher-ctr.c
new file mode 100644
index 0000000..a334abc
--- /dev/null
+++ b/cipher/cipher-ctr.c
@@ -0,0 +1,106 @@
+/* cipher-ctr.c  - Generic CTR mode implementation
+ * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003
+ *               2005, 2007, 2008, 2009, 2011 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser general Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <config.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include "g10lib.h"
+#include "cipher.h"
+#include "ath.h"
+#include "./cipher-internal.h"
+
+
+gcry_err_code_t
+_gcry_cipher_ctr_encrypt (gcry_cipher_hd_t c,
+                          unsigned char *outbuf, unsigned int outbuflen,
+                          const unsigned char *inbuf, unsigned int inbuflen)
+{
+  unsigned int n;
+  int i;
+  unsigned int blocksize = c->cipher->blocksize;
+  unsigned int nblocks;
+
+  if (outbuflen < inbuflen)
+    return GPG_ERR_BUFFER_TOO_SHORT;
+
+  /* First process a left over encrypted counter.  */
+  if (c->unused)
+    {
+      gcry_assert (c->unused < blocksize);
+      i = blocksize - c->unused;
+      for (n=0; c->unused && n < inbuflen; c->unused--, n++, i++)
+        {
+          /* XOR input with encrypted counter and store in output.  */
+          outbuf[n] = inbuf[n] ^ c->lastiv[i];
+        }
+      inbuf  += n;
+      outbuf += n;
+      inbuflen -= n;
+    }
+
+
+  /* Use a bulk method if available.  */
+  nblocks = inbuflen / blocksize;
+  if (nblocks && c->bulk.ctr_enc)
+    {
+      c->bulk.ctr_enc (&c->context.c, c->u_ctr.ctr, outbuf, inbuf, nblocks);
+      inbuf  += nblocks * blocksize;
+      outbuf += nblocks * blocksize;
+      inbuflen -= nblocks * blocksize;
+    }
+
+  /* If we don't have a bulk method use the standard method.  We also
+     use this method for the a remaining partial block.  */
+  if (inbuflen)
+    {
+      unsigned char tmp[MAX_BLOCKSIZE];
+
+      for (n=0; n < inbuflen; n++)
+        {
+          if ((n % blocksize) == 0)
+            {
+              c->cipher->encrypt (&c->context.c, tmp, c->u_ctr.ctr);
+
+              for (i = blocksize; i > 0; i--)
+                {
+                  c->u_ctr.ctr[i-1]++;
+                  if (c->u_ctr.ctr[i-1] != 0)
+                    break;
+                }
+            }
+
+          /* XOR input with encrypted counter and store in output.  */
+          outbuf[n] = inbuf[n] ^ tmp[n % blocksize];
+        }
+
+      /* Save the unused bytes of the counter.  */
+      n %= blocksize;
+      c->unused = (blocksize - n) % blocksize;
+      if (c->unused)
+        memcpy (c->lastiv+n, tmp+n, c->unused);
+
+      wipememory (tmp, sizeof tmp);
+    }
+
+  return 0;
+}
diff --git a/cipher/cipher-internal.h b/cipher/cipher-internal.h
new file mode 100644
index 0000000..437e9c0
--- /dev/null
+++ b/cipher/cipher-internal.h
@@ -0,0 +1,181 @@
+/* cipher-internal.h  - Internal defs for cipher.c
+ * Copyright (C) 2011 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser general Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef G10_CIPHER_INTERNAL_H
+#define G10_CIPHER_INTERNAL_H
+
+/* The maximum supported size of a block in bytes.  */
+#define MAX_BLOCKSIZE 16
+
+/* Magic values for the context structure.  */
+#define CTX_MAGIC_NORMAL 0x24091964
+#define CTX_MAGIC_SECURE 0x46919042
+
+/* Try to use 16 byte aligned cipher context for better performance.
+   We use the aligned attribute, thus it is only possible to implement
+   this with gcc.  */
+#undef NEED_16BYTE_ALIGNED_CONTEXT
+#if defined (__GNUC__)
+# define NEED_16BYTE_ALIGNED_CONTEXT 1
+#endif
+
+
+/* A VIA processor with the Padlock engine as well as the Intel AES_NI
+   instructions require an alignment of most data on a 16 byte
+   boundary.  Because we trick out the compiler while allocating the
+   context, the align attribute as used in rijndael.c does not work on
+   its own.  Thus we need to make sure that the entire context
+   structure is a aligned on that boundary.  We achieve this by
+   defining a new type and use that instead of our usual alignment
+   type.  */
+typedef union
+{
+  PROPERLY_ALIGNED_TYPE foo;
+#ifdef NEED_16BYTE_ALIGNED_CONTEXT
+  char bar[16] __attribute__ ((aligned (16)));
+#endif
+  char c[1];
+} cipher_context_alignment_t;
+
+
+/* The handle structure.  */
+struct gcry_cipher_handle
+{
+  int magic;
+  size_t actual_handle_size;     /* Allocated size of this handle. */
+  size_t handle_offset;          /* Offset to the malloced block.  */
+  gcry_cipher_spec_t *cipher;
+  cipher_extra_spec_t *extraspec;
+  gcry_module_t module;
+
+  /* The algorithm id.  This is a hack required because the module
+     interface does not easily allow to retrieve this value. */
+  int algo;
+
+  /* A structure with function pointers for bulk operations.  Due to
+     limitations of the module system (we don't want to change the
+     API) we need to keep these function pointers here.  The cipher
+     open function intializes them and the actual encryption routines
+     use them if they are not NULL.  */
+  struct {
+    void (*cfb_enc)(void *context, unsigned char *iv,
+                    void *outbuf_arg, const void *inbuf_arg,
+                    unsigned int nblocks);
+    void (*cfb_dec)(void *context, unsigned char *iv,
+                    void *outbuf_arg, const void *inbuf_arg,
+                    unsigned int nblocks);
+    void (*cbc_enc)(void *context, unsigned char *iv,
+                    void *outbuf_arg, const void *inbuf_arg,
+                    unsigned int nblocks, int cbc_mac);
+    void (*cbc_dec)(void *context, unsigned char *iv,
+                    void *outbuf_arg, const void *inbuf_arg,
+                    unsigned int nblocks);
+    void (*ctr_enc)(void *context, unsigned char *iv,
+                    void *outbuf_arg, const void *inbuf_arg,
+                    unsigned int nblocks);
+  } bulk;
+
+
+  int mode;
+  unsigned int flags;
+
+  struct {
+    unsigned int key:1; /* Set to 1 if a key has been set.  */
+    unsigned int iv:1;  /* Set to 1 if a IV has been set.  */
+  } marks;
+
+  /* The initialization vector.  For best performance we make sure
+     that it is properly aligned.  In particular some implementations
+     of bulk operations expect an 16 byte aligned IV.  */
+  union {
+    cipher_context_alignment_t iv_align;
+    unsigned char iv[MAX_BLOCKSIZE];
+  } u_iv;
+
+  /* The counter for CTR mode.  This field is also used by AESWRAP and
+     thus we can't use the U_IV union.  */
+  union {
+    cipher_context_alignment_t iv_align;
+    unsigned char ctr[MAX_BLOCKSIZE];
+  } u_ctr;
+
+  /* Space to save an IV or CTR for chaining operations.  */
+  unsigned char lastiv[MAX_BLOCKSIZE];
+  int unused;  /* Number of unused bytes in LASTIV. */
+
+  /* What follows are two contexts of the cipher in use.  The first
+     one needs to be aligned well enough for the cipher operation
+     whereas the second one is a copy created by cipher_setkey and
+     used by cipher_reset.  That second copy has no need for proper
+     aligment because it is only accessed by memcpy.  */
+  cipher_context_alignment_t context;
+};
+
+
+/*-- cipher-cbc.c --*/
+gcry_err_code_t _gcry_cipher_cbc_encrypt
+/*           */ (gcry_cipher_hd_t c,
+                 unsigned char *outbuf, unsigned int outbuflen,
+                 const unsigned char *inbuf, unsigned int inbuflen);
+gcry_err_code_t _gcry_cipher_cbc_decrypt
+/*           */ (gcry_cipher_hd_t c,
+                 unsigned char *outbuf, unsigned int outbuflen,
+                 const unsigned char *inbuf, unsigned int inbuflen);
+
+/*-- cipher-cfb.c --*/
+gcry_err_code_t _gcry_cipher_cfb_encrypt
+/*           */ (gcry_cipher_hd_t c,
+                 unsigned char *outbuf, unsigned int outbuflen,
+                 const unsigned char *inbuf, unsigned int inbuflen);
+gcry_err_code_t _gcry_cipher_cfb_decrypt
+/*           */ (gcry_cipher_hd_t c,
+                 unsigned char *outbuf, unsigned int outbuflen,
+                 const unsigned char *inbuf, unsigned int inbuflen);
+
+
+/*-- cipher-ofb.c --*/
+gcry_err_code_t _gcry_cipher_ofb_encrypt
+/*           */ (gcry_cipher_hd_t c,
+                 unsigned char *outbuf, unsigned int outbuflen,
+                 const unsigned char *inbuf, unsigned int inbuflen);
+gcry_err_code_t _gcry_cipher_ofb_decrypt
+/*           */ (gcry_cipher_hd_t c,
+                 unsigned char *outbuf, unsigned int outbuflen,
+                 const unsigned char *inbuf, unsigned int inbuflen);
+
+/*-- cipher-ctr.c --*/
+gcry_err_code_t _gcry_cipher_ctr_encrypt
+/*           */ (gcry_cipher_hd_t c,
+                 unsigned char *outbuf, unsigned int outbuflen,
+                 const unsigned char *inbuf, unsigned int inbuflen);
+
+
+/*-- cipher-aeswrap.c --*/
+gcry_err_code_t _gcry_cipher_aeswrap_encrypt
+/*           */   (gcry_cipher_hd_t c,
+                   byte *outbuf, unsigned int outbuflen,
+                   const byte *inbuf, unsigned int inbuflen);
+gcry_err_code_t _gcry_cipher_aeswrap_decrypt
+/*           */   (gcry_cipher_hd_t c,
+                   byte *outbuf, unsigned int outbuflen,
+                   const byte *inbuf, unsigned int inbuflen);
+
+
+
+#endif /*G10_CIPHER_INTERNAL_H*/
diff --git a/cipher/cipher-ofb.c b/cipher/cipher-ofb.c
new file mode 100644
index 0000000..e5868cd
--- /dev/null
+++ b/cipher/cipher-ofb.c
@@ -0,0 +1,135 @@
+/* cipher-ofb.c  - Generic OFB mode implementation
+ * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003
+ *               2005, 2007, 2008, 2009, 2011 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser general Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <config.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include "g10lib.h"
+#include "cipher.h"
+#include "ath.h"
+#include "./cipher-internal.h"
+
+
+gcry_err_code_t
+_gcry_cipher_ofb_encrypt (gcry_cipher_hd_t c,
+                          unsigned char *outbuf, unsigned int outbuflen,
+                          const unsigned char *inbuf, unsigned int inbuflen)
+{
+  unsigned char *ivp;
+  size_t blocksize = c->cipher->blocksize;
+
+  if (outbuflen < inbuflen)
+    return GPG_ERR_BUFFER_TOO_SHORT;
+
+  if ( inbuflen <= c->unused )
+    {
+      /* Short enough to be encoded by the remaining XOR mask. */
+      /* XOR the input with the IV */
+      for (ivp=c->u_iv.iv+c->cipher->blocksize - c->unused;
+           inbuflen;
+           inbuflen--, c->unused-- )
+        *outbuf++ = (*ivp++ ^ *inbuf++);
+      return 0;
+    }
+
+  if( c->unused )
+    {
+      inbuflen -= c->unused;
+      for(ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
+        *outbuf++ = (*ivp++ ^ *inbuf++);
+    }
+
+  /* Now we can process complete blocks. */
+  while ( inbuflen >= blocksize )
+    {
+      int i;
+      /* Encrypt the IV (and save the current one). */
+      memcpy( c->lastiv, c->u_iv.iv, blocksize );
+      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
+
+      for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
+        *outbuf++ = (*ivp++ ^ *inbuf++);
+      inbuflen -= blocksize;
+    }
+  if ( inbuflen )
+    { /* process the remaining bytes */
+      memcpy( c->lastiv, c->u_iv.iv, blocksize );
+      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
+      c->unused = blocksize;
+      c->unused -= inbuflen;
+      for(ivp=c->u_iv.iv; inbuflen; inbuflen-- )
+        *outbuf++ = (*ivp++ ^ *inbuf++);
+    }
+  return 0;
+}
+
+
+gcry_err_code_t
+_gcry_cipher_ofb_decrypt (gcry_cipher_hd_t c,
+                          unsigned char *outbuf, unsigned int outbuflen,
+                          const unsigned char *inbuf, unsigned int inbuflen)
+{
+  unsigned char *ivp;
+  size_t blocksize = c->cipher->blocksize;
+
+  if (outbuflen < inbuflen)
+    return GPG_ERR_BUFFER_TOO_SHORT;
+
+  if( inbuflen <= c->unused )
+    {
+      /* Short enough to be encoded by the remaining XOR mask. */
+      for (ivp=c->u_iv.iv+blocksize - c->unused; inbuflen; inbuflen--,c->unused--)
+        *outbuf++ = *ivp++ ^ *inbuf++;
+      return 0;
+    }
+
+  if ( c->unused )
+    {
+      inbuflen -= c->unused;
+      for (ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
+        *outbuf++ = *ivp++ ^ *inbuf++;
+    }
+
+  /* Now we can process complete blocks. */
+  while ( inbuflen >= blocksize )
+    {
+      int i;
+      /* Encrypt the IV (and save the current one). */
+      memcpy( c->lastiv, c->u_iv.iv, blocksize );
+      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
+      for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
+        *outbuf++ = *ivp++ ^ *inbuf++;
+      inbuflen -= blocksize;
+    }
+  if ( inbuflen )
+    { /* Process the remaining bytes. */
+      /* Encrypt the IV (and save the current one). */
+      memcpy( c->lastiv, c->u_iv.iv, blocksize );
+      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
+      c->unused = blocksize;
+      c->unused -= inbuflen;
+      for (ivp=c->u_iv.iv; inbuflen; inbuflen-- )
+        *outbuf++ = *ivp++ ^ *inbuf++;
+    }
+  return 0;
+}
diff --git a/cipher/cipher.c b/cipher/cipher.c
index b99ab41..028085e 100644
--- a/cipher/cipher.c
+++ b/cipher/cipher.c
@@ -27,19 +27,7 @@
 #include "g10lib.h"
 #include "cipher.h"
 #include "ath.h"
-
-#define MAX_BLOCKSIZE 16
-#define TABLE_SIZE 14
-#define CTX_MAGIC_NORMAL 0x24091964
-#define CTX_MAGIC_SECURE 0x46919042
-
-/* Try to use 16 byte aligned cipher context for better performance.
-   We use the aligned attribute, thus it is only possible to implement
-   this with gcc.  */
-#undef NEED_16BYTE_ALIGNED_CONTEXT
-#if defined (__GNUC__)
-# define NEED_16BYTE_ALIGNED_CONTEXT 1
-#endif
+#include "./cipher-internal.h"
 
 /* A dummy extraspec so that we do not need to tests the extraspec
    field from the module specification against NULL and instead
@@ -140,98 +128,6 @@ static int default_ciphers_registered;
   while (0)
 
 
-/* A VIA processor with the Padlock engine as well as the Intel AES_NI
-   instructions require an alignment of most data on a 16 byte
-   boundary.  Because we trick out the compiler while allocating the
-   context, the align attribute as used in rijndael.c does not work on
-   its own.  Thus we need to make sure that the entire context
-   structure is a aligned on that boundary.  We achieve this by
-   defining a new type and use that instead of our usual alignment
-   type.  */
-typedef union
-{
-  PROPERLY_ALIGNED_TYPE foo;
-#ifdef NEED_16BYTE_ALIGNED_CONTEXT
-  char bar[16] __attribute__ ((aligned (16)));
-#endif
-  char c[1];
-} cipher_context_alignment_t;
-
-
-/* The handle structure.  */
-struct gcry_cipher_handle
-{
-  int magic;
-  size_t actual_handle_size;     /* Allocated size of this handle. */
-  size_t handle_offset;          /* Offset to the malloced block.  */
-  gcry_cipher_spec_t *cipher;
-  cipher_extra_spec_t *extraspec;
-  gcry_module_t module;
-
-  /* The algorithm id.  This is a hack required because the module
-     interface does not easily allow to retrieve this value. */
-  int algo;
-
-  /* A structure with function pointers for bulk operations.  Due to
-     limitations of the module system (we don't want to change the
-     API) we need to keep these function pointers here.  The cipher
-     open function intializes them and the actual encryption routines
-     use them if they are not NULL.  */
-  struct {
-    void (*cfb_enc)(void *context, unsigned char *iv,
-                    void *outbuf_arg, const void *inbuf_arg,
-                    unsigned int nblocks);
-    void (*cfb_dec)(void *context, unsigned char *iv,
-                    void *outbuf_arg, const void *inbuf_arg,
-                    unsigned int nblocks);
-    void (*cbc_enc)(void *context, unsigned char *iv,
-                    void *outbuf_arg, const void *inbuf_arg,
-                    unsigned int nblocks, int cbc_mac);
-    void (*cbc_dec)(void *context, unsigned char *iv,
-                    void *outbuf_arg, const void *inbuf_arg,
-                    unsigned int nblocks);
-    void (*ctr_enc)(void *context, unsigned char *iv,
-                    void *outbuf_arg, const void *inbuf_arg,
-                    unsigned int nblocks);
-  } bulk;
-
-
-  int mode;
-  unsigned int flags;
-
-  struct {
-    unsigned int key:1; /* Set to 1 if a key has been set.  */
-    unsigned int iv:1;  /* Set to 1 if a IV has been set.  */
-  } marks;
-
-  /* The initialization vector.  For best performance we make sure
-     that it is properly aligned.  In particular some implementations
-     of bulk operations expect an 16 byte aligned IV.  */
-  union {
-    cipher_context_alignment_t iv_align;
-    unsigned char iv[MAX_BLOCKSIZE];
-  } u_iv;
-
-  /* The counter for CTR mode.  This field is also used by AESWRAP and
-     thus we can't use the U_IV union.  */
-  union {
-    cipher_context_alignment_t iv_align;
-    unsigned char ctr[MAX_BLOCKSIZE];
-  } u_ctr;
-
-  /* Space to save an IV or CTR for chaining operations.  */
-  unsigned char lastiv[MAX_BLOCKSIZE];
-  int unused;  /* Number of unused bytes in LASTIV. */
-
-  /* What follows are two contexts of the cipher in use.  The first
-     one needs to be aligned well enough for the cipher operation
-     whereas the second one is a copy created by cipher_setkey and
-     used by cipher_reset.  That second copy has no need for proper
-     aligment because it is only accessed by memcpy.  */
-  cipher_context_alignment_t context;
-};
-
-
 
 /* These dummy functions are used in case a cipher implementation
    refuses to provide it's own functions.  */
@@ -991,700 +887,6 @@ do_ecb_decrypt (gcry_cipher_hd_t c,
 }
 
 
-static gcry_err_code_t
-do_cbc_encrypt (gcry_cipher_hd_t c,
-                unsigned char *outbuf, unsigned int outbuflen,
-                const unsigned char *inbuf, unsigned int inbuflen)
-{
-  unsigned int n;
-  unsigned char *ivp;
-  int i;
-  size_t blocksize = c->cipher->blocksize;
-  unsigned nblocks = inbuflen / blocksize;
-
-  if (outbuflen < ((c->flags & GCRY_CIPHER_CBC_MAC)? blocksize : inbuflen))
-    return GPG_ERR_BUFFER_TOO_SHORT;
-
-  if ((inbuflen % c->cipher->blocksize)
-      && !(inbuflen > c->cipher->blocksize
-           && (c->flags & GCRY_CIPHER_CBC_CTS)))
-    return GPG_ERR_INV_LENGTH;
-
-  if ((c->flags & GCRY_CIPHER_CBC_CTS) && inbuflen > blocksize)
-    {
-      if ((inbuflen % blocksize) == 0)
-	nblocks--;
-    }
-
-  if (c->bulk.cbc_enc)
-    {
-      c->bulk.cbc_enc (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks,
-                       (c->flags & GCRY_CIPHER_CBC_MAC));
-      inbuf  += nblocks * blocksize;
-      if (!(c->flags & GCRY_CIPHER_CBC_MAC))
-        outbuf += nblocks * blocksize;
-    }
-  else
-    {
-      for (n=0; n < nblocks; n++ )
-        {
-          for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
-            outbuf[i] = inbuf[i] ^ *ivp++;
-          c->cipher->encrypt ( &c->context.c, outbuf, outbuf );
-          memcpy (c->u_iv.iv, outbuf, blocksize );
-          inbuf  += blocksize;
-          if (!(c->flags & GCRY_CIPHER_CBC_MAC))
-            outbuf += blocksize;
-        }
-    }
-
-  if ((c->flags & GCRY_CIPHER_CBC_CTS) && inbuflen > blocksize)
-    {
-      /* We have to be careful here, since outbuf might be equal to
-         inbuf.  */
-      int restbytes;
-      unsigned char b;
-
-      if ((inbuflen % blocksize) == 0)
-        restbytes = blocksize;
-      else
-        restbytes = inbuflen % blocksize;
-
-      outbuf -= blocksize;
-      for (ivp = c->u_iv.iv, i = 0; i < restbytes; i++)
-        {
-          b = inbuf[i];
-          outbuf[blocksize + i] = outbuf[i];
-          outbuf[i] = b ^ *ivp++;
-        }
-      for (; i < blocksize; i++)
-        outbuf[i] = 0 ^ *ivp++;
-
-      c->cipher->encrypt (&c->context.c, outbuf, outbuf);
-      memcpy (c->u_iv.iv, outbuf, blocksize);
-    }
-
-  return 0;
-}
-
-
-static gcry_err_code_t
-do_cbc_decrypt (gcry_cipher_hd_t c,
-                unsigned char *outbuf, unsigned int outbuflen,
-                const unsigned char *inbuf, unsigned int inbuflen)
-{
-  unsigned int n;
-  unsigned char *ivp;
-  int i;
-  size_t blocksize = c->cipher->blocksize;
-  unsigned int nblocks = inbuflen / blocksize;
-
-  if (outbuflen < inbuflen)
-    return GPG_ERR_BUFFER_TOO_SHORT;
-
-  if ((inbuflen % c->cipher->blocksize)
-      && !(inbuflen > c->cipher->blocksize
-           && (c->flags & GCRY_CIPHER_CBC_CTS)))
-    return GPG_ERR_INV_LENGTH;
-
-  if ((c->flags & GCRY_CIPHER_CBC_CTS) && inbuflen > blocksize)
-    {
-      nblocks--;
-      if ((inbuflen % blocksize) == 0)
-	nblocks--;
-      memcpy (c->lastiv, c->u_iv.iv, blocksize);
-    }
-
-  if (c->bulk.cbc_dec)
-    {
-      c->bulk.cbc_dec (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks);
-      inbuf  += nblocks * blocksize;
-      outbuf += nblocks * blocksize;
-    }
-  else
-    {
-      for (n=0; n < nblocks; n++ )
-        {
-          /* Because outbuf and inbuf might be the same, we have to
-           * save the original ciphertext block.  We use LASTIV for
-           * this here because it is not used otherwise. */
-          memcpy (c->lastiv, inbuf, blocksize);
-          c->cipher->decrypt ( &c->context.c, outbuf, inbuf );
-          for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
-	    outbuf[i] ^= *ivp++;
-          memcpy(c->u_iv.iv, c->lastiv, blocksize );
-          inbuf  += c->cipher->blocksize;
-          outbuf += c->cipher->blocksize;
-        }
-    }
-
-  if ((c->flags & GCRY_CIPHER_CBC_CTS) && inbuflen > blocksize)
-    {
-      int restbytes;
-
-      if ((inbuflen % blocksize) == 0)
-        restbytes = blocksize;
-      else
-        restbytes = inbuflen % blocksize;
-
-      memcpy (c->lastiv, c->u_iv.iv, blocksize );         /* Save Cn-2. */
-      memcpy (c->u_iv.iv, inbuf + blocksize, restbytes ); /* Save Cn. */
-
-      c->cipher->decrypt ( &c->context.c, outbuf, inbuf );
-      for (ivp=c->u_iv.iv,i=0; i < restbytes; i++ )
-        outbuf[i] ^= *ivp++;
-
-      memcpy(outbuf + blocksize, outbuf, restbytes);
-      for(i=restbytes; i < blocksize; i++)
-        c->u_iv.iv[i] = outbuf[i];
-      c->cipher->decrypt (&c->context.c, outbuf, c->u_iv.iv);
-      for(ivp=c->lastiv,i=0; i < blocksize; i++ )
-        outbuf[i] ^= *ivp++;
-      /* c->lastiv is now really lastlastiv, does this matter? */
-    }
-
-  return 0;
-}
-
-
-static gcry_err_code_t
-do_cfb_encrypt (gcry_cipher_hd_t c,
-                unsigned char *outbuf, unsigned int outbuflen,
-                const unsigned char *inbuf, unsigned int inbuflen)
-{
-  unsigned char *ivp;
-  size_t blocksize = c->cipher->blocksize;
-  size_t blocksize_x_2 = blocksize + blocksize;
-
-  if (outbuflen < inbuflen)
-    return GPG_ERR_BUFFER_TOO_SHORT;
-
-  if ( inbuflen <= c->unused )
-    {
-      /* Short enough to be encoded by the remaining XOR mask. */
-      /* XOR the input with the IV and store input into IV. */
-      for (ivp=c->u_iv.iv+c->cipher->blocksize - c->unused;
-           inbuflen;
-           inbuflen--, c->unused-- )
-        *outbuf++ = (*ivp++ ^= *inbuf++);
-      return 0;
-    }
-
-  if ( c->unused )
-    {
-      /* XOR the input with the IV and store input into IV */
-      inbuflen -= c->unused;
-      for(ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
-        *outbuf++ = (*ivp++ ^= *inbuf++);
-    }
-
-  /* Now we can process complete blocks.  We use a loop as long as we
-     have at least 2 blocks and use conditions for the rest.  This
-     also allows to use a bulk encryption function if available.  */
-  if (inbuflen >= blocksize_x_2 && c->bulk.cfb_enc)
-    {
-      unsigned int nblocks = inbuflen / blocksize;
-      c->bulk.cfb_enc (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks);
-      outbuf += nblocks * blocksize;
-      inbuf  += nblocks * blocksize;
-      inbuflen -= nblocks * blocksize;
-    }
-  else
-    {
-      while ( inbuflen >= blocksize_x_2 )
-        {
-          int i;
-          /* Encrypt the IV. */
-          c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
-          /* XOR the input with the IV and store input into IV.  */
-          for(ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
-            *outbuf++ = (*ivp++ ^= *inbuf++);
-          inbuflen -= blocksize;
-        }
-    }
-
-  if ( inbuflen >= blocksize )
-    {
-      int i;
-      /* Save the current IV and then encrypt the IV. */
-      memcpy( c->lastiv, c->u_iv.iv, blocksize );
-      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
-      /* XOR the input with the IV and store input into IV */
-      for(ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
-        *outbuf++ = (*ivp++ ^= *inbuf++);
-      inbuflen -= blocksize;
-    }
-  if ( inbuflen )
-    {
-      /* Save the current IV and then encrypt the IV. */
-      memcpy( c->lastiv, c->u_iv.iv, blocksize );
-      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
-      c->unused = blocksize;
-      /* Apply the XOR. */
-      c->unused -= inbuflen;
-      for(ivp=c->u_iv.iv; inbuflen; inbuflen-- )
-        *outbuf++ = (*ivp++ ^= *inbuf++);
-    }
-  return 0;
-}
-
-
-static gcry_err_code_t
-do_cfb_decrypt (gcry_cipher_hd_t c,
-                unsigned char *outbuf, unsigned int outbuflen,
-                const unsigned char *inbuf, unsigned int inbuflen)
-{
-  unsigned char *ivp;
-  unsigned long temp;
-  int i;
-  size_t blocksize = c->cipher->blocksize;
-  size_t blocksize_x_2 = blocksize + blocksize;
-
-  if (outbuflen < inbuflen)
-    return GPG_ERR_BUFFER_TOO_SHORT;
-
-  if (inbuflen <= c->unused)
-    {
-      /* Short enough to be encoded by the remaining XOR mask. */
-      /* XOR the input with the IV and store input into IV. */
-      for (ivp=c->u_iv.iv+blocksize - c->unused;
-           inbuflen;
-           inbuflen--, c->unused--)
-        {
-          temp = *inbuf++;
-          *outbuf++ = *ivp ^ temp;
-          *ivp++ = temp;
-        }
-      return 0;
-    }
-
-  if (c->unused)
-    {
-      /* XOR the input with the IV and store input into IV. */
-      inbuflen -= c->unused;
-      for (ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
-        {
-          temp = *inbuf++;
-          *outbuf++ = *ivp ^ temp;
-          *ivp++ = temp;
-        }
-    }
-
-  /* Now we can process complete blocks.  We use a loop as long as we
-     have at least 2 blocks and use conditions for the rest.  This
-     also allows to use a bulk encryption function if available.  */
-  if (inbuflen >= blocksize_x_2 && c->bulk.cfb_dec)
-    {
-      unsigned int nblocks = inbuflen / blocksize;
-      c->bulk.cfb_dec (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks);
-      outbuf += nblocks * blocksize;
-      inbuf  += nblocks * blocksize;
-      inbuflen -= nblocks * blocksize;
-    }
-  else
-    {
-      while (inbuflen >= blocksize_x_2 )
-        {
-          /* Encrypt the IV. */
-          c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
-          /* XOR the input with the IV and store input into IV. */
-          for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
-            {
-              temp = *inbuf++;
-              *outbuf++ = *ivp ^ temp;
-              *ivp++ = temp;
-            }
-          inbuflen -= blocksize;
-        }
-    }
-
-  if (inbuflen >= blocksize )
-    {
-      /* Save the current IV and then encrypt the IV. */
-      memcpy ( c->lastiv, c->u_iv.iv, blocksize);
-      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
-      /* XOR the input with the IV and store input into IV */
-      for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
-        {
-          temp = *inbuf++;
-          *outbuf++ = *ivp ^ temp;
-          *ivp++ = temp;
-        }
-      inbuflen -= blocksize;
-    }
-
-  if (inbuflen)
-    {
-      /* Save the current IV and then encrypt the IV. */
-      memcpy ( c->lastiv, c->u_iv.iv, blocksize );
-      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
-      c->unused = blocksize;
-      /* Apply the XOR. */
-      c->unused -= inbuflen;
-      for (ivp=c->u_iv.iv; inbuflen; inbuflen-- )
-        {
-          temp = *inbuf++;
-          *outbuf++ = *ivp ^ temp;
-          *ivp++ = temp;
-        }
-    }
-  return 0;
-}
-
-
-static gcry_err_code_t
-do_ofb_encrypt (gcry_cipher_hd_t c,
-                unsigned char *outbuf, unsigned int outbuflen,
-                const unsigned char *inbuf, unsigned int inbuflen)
-{
-  unsigned char *ivp;
-  size_t blocksize = c->cipher->blocksize;
-
-  if (outbuflen < inbuflen)
-    return GPG_ERR_BUFFER_TOO_SHORT;
-
-  if ( inbuflen <= c->unused )
-    {
-      /* Short enough to be encoded by the remaining XOR mask. */
-      /* XOR the input with the IV */
-      for (ivp=c->u_iv.iv+c->cipher->blocksize - c->unused;
-           inbuflen;
-           inbuflen--, c->unused-- )
-        *outbuf++ = (*ivp++ ^ *inbuf++);
-      return 0;
-    }
-
-  if( c->unused )
-    {
-      inbuflen -= c->unused;
-      for(ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
-        *outbuf++ = (*ivp++ ^ *inbuf++);
-    }
-
-  /* Now we can process complete blocks. */
-  while ( inbuflen >= blocksize )
-    {
-      int i;
-      /* Encrypt the IV (and save the current one). */
-      memcpy( c->lastiv, c->u_iv.iv, blocksize );
-      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
-
-      for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
-        *outbuf++ = (*ivp++ ^ *inbuf++);
-      inbuflen -= blocksize;
-    }
-  if ( inbuflen )
-    { /* process the remaining bytes */
-      memcpy( c->lastiv, c->u_iv.iv, blocksize );
-      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
-      c->unused = blocksize;
-      c->unused -= inbuflen;
-      for(ivp=c->u_iv.iv; inbuflen; inbuflen-- )
-        *outbuf++ = (*ivp++ ^ *inbuf++);
-    }
-  return 0;
-}
-
-static gcry_err_code_t
-do_ofb_decrypt (gcry_cipher_hd_t c,
-                unsigned char *outbuf, unsigned int outbuflen,
-                const unsigned char *inbuf, unsigned int inbuflen)
-{
-  unsigned char *ivp;
-  size_t blocksize = c->cipher->blocksize;
-
-  if (outbuflen < inbuflen)
-    return GPG_ERR_BUFFER_TOO_SHORT;
-
-  if( inbuflen <= c->unused )
-    {
-      /* Short enough to be encoded by the remaining XOR mask. */
-      for (ivp=c->u_iv.iv+blocksize - c->unused; inbuflen; inbuflen--,c->unused--)
-        *outbuf++ = *ivp++ ^ *inbuf++;
-      return 0;
-    }
-
-  if ( c->unused )
-    {
-      inbuflen -= c->unused;
-      for (ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
-        *outbuf++ = *ivp++ ^ *inbuf++;
-    }
-
-  /* Now we can process complete blocks. */
-  while ( inbuflen >= blocksize )
-    {
-      int i;
-      /* Encrypt the IV (and save the current one). */
-      memcpy( c->lastiv, c->u_iv.iv, blocksize );
-      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
-      for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
-        *outbuf++ = *ivp++ ^ *inbuf++;
-      inbuflen -= blocksize;
-    }
-  if ( inbuflen )
-    { /* Process the remaining bytes. */
-      /* Encrypt the IV (and save the current one). */
-      memcpy( c->lastiv, c->u_iv.iv, blocksize );
-      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
-      c->unused = blocksize;
-      c->unused -= inbuflen;
-      for (ivp=c->u_iv.iv; inbuflen; inbuflen-- )
-        *outbuf++ = *ivp++ ^ *inbuf++;
-    }
-  return 0;
-}
-
-
-static gcry_err_code_t
-do_ctr_encrypt (gcry_cipher_hd_t c,
-                unsigned char *outbuf, unsigned int outbuflen,
-                const unsigned char *inbuf, unsigned int inbuflen)
-{
-  unsigned int n;
-  int i;
-  unsigned int blocksize = c->cipher->blocksize;
-  unsigned int nblocks;
-
-  if (outbuflen < inbuflen)
-    return GPG_ERR_BUFFER_TOO_SHORT;
-
-  /* First process a left over encrypted counter.  */
-  if (c->unused)
-    {
-      gcry_assert (c->unused < blocksize);
-      i = blocksize - c->unused;
-      for (n=0; c->unused && n < inbuflen; c->unused--, n++, i++)
-        {
-          /* XOR input with encrypted counter and store in output.  */
-          outbuf[n] = inbuf[n] ^ c->lastiv[i];
-        }
-      inbuf  += n;
-      outbuf += n;
-      inbuflen -= n;
-    }
-
-
-  /* Use a bulk method if available.  */
-  nblocks = inbuflen / blocksize;
-  if (nblocks && c->bulk.ctr_enc)
-    {
-      c->bulk.ctr_enc (&c->context.c, c->u_ctr.ctr, outbuf, inbuf, nblocks);
-      inbuf  += nblocks * blocksize;
-      outbuf += nblocks * blocksize;
-      inbuflen -= nblocks * blocksize;
-    }
-
-  /* If we don't have a bulk method use the standard method.  We also
-     use this method for the a remaining partial block.  */
-  if (inbuflen)
-    {
-      unsigned char tmp[MAX_BLOCKSIZE];
-
-      for (n=0; n < inbuflen; n++)
-        {
-          if ((n % blocksize) == 0)
-            {
-              c->cipher->encrypt (&c->context.c, tmp, c->u_ctr.ctr);
-
-              for (i = blocksize; i > 0; i--)
-                {
-                  c->u_ctr.ctr[i-1]++;
-                  if (c->u_ctr.ctr[i-1] != 0)
-                    break;
-                }
-            }
-
-          /* XOR input with encrypted counter and store in output.  */
-          outbuf[n] = inbuf[n] ^ tmp[n % blocksize];
-        }
-
-      /* Save the unused bytes of the counter.  */
-      n %= blocksize;
-      c->unused = (blocksize - n) % blocksize;
-      if (c->unused)
-        memcpy (c->lastiv+n, tmp+n, c->unused);
-
-      wipememory (tmp, sizeof tmp);
-    }
-
-  return 0;
-}
-
-static gcry_err_code_t
-do_ctr_decrypt (gcry_cipher_hd_t c,
-                unsigned char *outbuf, unsigned int outbuflen,
-                const unsigned char *inbuf, unsigned int inbuflen)
-{
-  return do_ctr_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
-}
-
-
-/* Perform the AES-Wrap algorithm as specified by RFC3394.  We
-   implement this as a mode usable with any cipher algorithm of
-   blocksize 128.  */
-static gcry_err_code_t
-do_aeswrap_encrypt (gcry_cipher_hd_t c, byte *outbuf, unsigned int outbuflen,
-                    const byte *inbuf, unsigned int inbuflen )
-{
-  int j, x;
-  unsigned int n, i;
-  unsigned char *r, *a, *b;
-  unsigned char t[8];
-
-#if MAX_BLOCKSIZE < 8
-#error Invalid block size
-#endif
-  /* We require a cipher with a 128 bit block length.  */
-  if (c->cipher->blocksize != 16)
-    return GPG_ERR_INV_LENGTH;
-
-  /* The output buffer must be able to hold the input data plus one
-     additional block.  */
-  if (outbuflen < inbuflen + 8)
-    return GPG_ERR_BUFFER_TOO_SHORT;
-  /* Input data must be multiple of 64 bits.  */
-  if (inbuflen % 8)
-    return GPG_ERR_INV_ARG;
-
-  n = inbuflen / 8;
-
-  /* We need at least two 64 bit blocks.  */
-  if (n < 2)
-    return GPG_ERR_INV_ARG;
-
-  r = outbuf;
-  a = outbuf;  /* We store A directly in OUTBUF.  */
-  b = c->u_ctr.ctr;  /* B is also used to concatenate stuff.  */
-
-  /* If an IV has been set we use that IV as the Alternative Initial
-     Value; if it has not been set we use the standard value.  */
-  if (c->marks.iv)
-    memcpy (a, c->u_iv.iv, 8);
-  else
-    memset (a, 0xa6, 8);
-
-  /* Copy the inbuf to the outbuf. */
-  memmove (r+8, inbuf, inbuflen);
-
-  memset (t, 0, sizeof t); /* t := 0.  */
-
-  for (j = 0; j <= 5; j++)
-    {
-      for (i = 1; i <= n; i++)
-        {
-          /* B := AES_k( A | R[i] ) */
-          memcpy (b, a, 8);
-          memcpy (b+8, r+i*8, 8);
-          c->cipher->encrypt (&c->context.c, b, b);
-          /* t := t + 1  */
-	  for (x = 7; x >= 0; x--)
-	    {
-	      t[x]++;
-	      if (t[x])
-		break;
-	    }
-          /* A := MSB_64(B) ^ t */
-          for (x=0; x < 8; x++)
-            a[x] = b[x] ^ t[x];
-          /* R[i] := LSB_64(B) */
-          memcpy (r+i*8, b+8, 8);
-        }
-   }
-
-  return 0;
-}
-
-/* Perform the AES-Unwrap algorithm as specified by RFC3394.  We
-   implement this as a mode usable with any cipher algorithm of
-   blocksize 128.  */
-static gcry_err_code_t
-do_aeswrap_decrypt (gcry_cipher_hd_t c, byte *outbuf, unsigned int outbuflen,
-                    const byte *inbuf, unsigned int inbuflen)
-{
-  int j, x;
-  unsigned int n, i;
-  unsigned char *r, *a, *b;
-  unsigned char t[8];
-
-#if MAX_BLOCKSIZE < 8
-#error Invalid block size
-#endif
-  /* We require a cipher with a 128 bit block length.  */
-  if (c->cipher->blocksize != 16)
-    return GPG_ERR_INV_LENGTH;
-
-  /* The output buffer must be able to hold the input data minus one
-     additional block.  Fixme: The caller has more restrictive checks
-     - we may want to fix them for this mode.  */
-  if (outbuflen + 8  < inbuflen)
-    return GPG_ERR_BUFFER_TOO_SHORT;
-  /* Input data must be multiple of 64 bits.  */
-  if (inbuflen % 8)
-    return GPG_ERR_INV_ARG;
-
-  n = inbuflen / 8;
-
-  /* We need at least three 64 bit blocks.  */
-  if (n < 3)
-    return GPG_ERR_INV_ARG;
-
-  r = outbuf;
-  a = c->lastiv;  /* We use c->LASTIV as buffer for A.  */
-  b = c->u_ctr.ctr;     /* B is also used to concatenate stuff.  */
-
-  /* Copy the inbuf to the outbuf and save A. */
-  memcpy (a, inbuf, 8);
-  memmove (r, inbuf+8, inbuflen-8);
-  n--; /* Reduce to actual number of data blocks.  */
-
-  /* t := 6 * n  */
-  i = n * 6;  /* The range is valid because: n = inbuflen / 8 - 1.  */
-  for (x=0; x < 8 && x < sizeof (i); x++)
-    t[7-x] = i >> (8*x);
-  for (; x < 8; x++)
-    t[7-x] = 0;
-
-  for (j = 5; j >= 0; j--)
-    {
-      for (i = n; i >= 1; i--)
-        {
-          /* B := AES_k^1( (A ^ t)| R[i] ) */
-          for (x = 0; x < 8; x++)
-            b[x] = a[x] ^ t[x];
-          memcpy (b+8, r+(i-1)*8, 8);
-          c->cipher->decrypt (&c->context.c, b, b);
-          /* t := t - 1  */
-	  for (x = 7; x >= 0; x--)
-	    {
-	      t[x]--;
-	      if (t[x] != 0xff)
-		break;
-	    }
-          /* A := MSB_64(B) */
-          memcpy (a, b, 8);
-          /* R[i] := LSB_64(B) */
-          memcpy (r+(i-1)*8, b+8, 8);
-        }
-   }
-
-  /* If an IV has been set we compare against this Alternative Initial
-     Value; if it has not been set we compare against the standard IV.  */
-  if (c->marks.iv)
-    j = memcmp (a, c->u_iv.iv, 8);
-  else
-    {
-      for (j=0, x=0; x < 8; x++)
-        if (a[x] != 0xa6)
-          {
-            j=1;
-            break;
-          }
-    }
-  return j? GPG_ERR_CHECKSUM : 0;
-}
-
-
 /****************
  * Encrypt INBUF to OUTBUF with the mode selected at open.
  * inbuf and outbuf may overlap or be the same.
@@ -1703,23 +905,24 @@ cipher_encrypt (gcry_cipher_hd_t c, byte *outbuf, unsigned int outbuflen,
       break;
 
     case GCRY_CIPHER_MODE_CBC:
-      rc = do_cbc_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
+      rc = _gcry_cipher_cbc_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
       break;
 
     case GCRY_CIPHER_MODE_CFB:
-      rc = do_cfb_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
+      rc = _gcry_cipher_cfb_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
       break;
 
     case GCRY_CIPHER_MODE_OFB:
-      rc = do_ofb_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
+      rc = _gcry_cipher_ofb_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
       break;
 
     case GCRY_CIPHER_MODE_CTR:
-      rc = do_ctr_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
+      rc = _gcry_cipher_ctr_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
       break;
 
     case GCRY_CIPHER_MODE_AESWRAP:
-      rc = do_aeswrap_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
+      rc = _gcry_cipher_aeswrap_encrypt (c, outbuf, outbuflen,
+                                         inbuf, inbuflen);
       break;
 
     case GCRY_CIPHER_MODE_STREAM:
@@ -1795,23 +998,24 @@ cipher_decrypt (gcry_cipher_hd_t c, byte *outbuf, unsigned int outbuflen,
       break;
 
     case GCRY_CIPHER_MODE_CBC:
-      rc = do_cbc_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
+      rc = _gcry_cipher_cbc_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
       break;
 
     case GCRY_CIPHER_MODE_CFB:
-      rc = do_cfb_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
+      rc = _gcry_cipher_cfb_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
       break;
 
     case GCRY_CIPHER_MODE_OFB:
-      rc = do_ofb_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
+      rc = _gcry_cipher_ofb_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
       break;
 
     case GCRY_CIPHER_MODE_CTR:
-      rc = do_ctr_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
+      rc = _gcry_cipher_ctr_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
       break;
 
     case GCRY_CIPHER_MODE_AESWRAP:
-      rc = do_aeswrap_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
+      rc = _gcry_cipher_aeswrap_decrypt (c, outbuf, outbuflen,
+                                         inbuf, inbuflen);
       break;
 
     case GCRY_CIPHER_MODE_STREAM:

-----------------------------------------------------------------------

Summary of changes:
 cipher/ChangeLog         |   14 +
 cipher/Makefile.am       |    4 +-
 cipher/cipher-aeswrap.c  |  196 +++++++++++
 cipher/cipher-cbc.c      |  187 +++++++++++
 cipher/cipher-cfb.c      |  215 ++++++++++++
 cipher/cipher-ctr.c      |  106 ++++++
 cipher/cipher-internal.h |  181 ++++++++++
 cipher/cipher-ofb.c      |  135 ++++++++
 cipher/cipher.c          |  822 +---------------------------------------------
 9 files changed, 1050 insertions(+), 810 deletions(-)
 create mode 100644 cipher/cipher-aeswrap.c
 create mode 100644 cipher/cipher-cbc.c
 create mode 100644 cipher/cipher-cfb.c
 create mode 100644 cipher/cipher-ctr.c
 create mode 100644 cipher/cipher-internal.h
 create mode 100644 cipher/cipher-ofb.c


hooks/post-receive
-- 
The GNU crypto library
http://git.gnupg.org




More information about the Gnupg-commits mailing list