[PATCH 3/4] rijndael: refactor to reduce number of #ifdefs and branches
Jussi Kivilinna
jussi.kivilinna at iki.fi
Mon Dec 1 20:12:52 CET 2014
* cipher/rijndael-aesni.c (_gcry_aes_aesni_encrypt)
(_gcry_aes_aesni_decrypt): Make return stack burn depth.
* cipher/rijndael-amd64.S (_gcry_aes_amd64_encrypt_block)
(_gcry_aes_amd64_decrypt_block): Ditto.
* cipher/rijndael-arm.S (_gcry_aes_arm_encrypt_block)
(_gcry_aes_arm_decrypt_block): Ditto.
* cipher/rijndael-internal.h (RIJNDAEL_context_s)
(rijndael_cryptfn_t): New.
(RIJNDAEL_context): New members 'encrypt_fn' and 'decrypt_fn'.
* cipher/rijndael.c (_gcry_aes_amd64_encrypt_block)
(_gcry_aes_amd64_decrypt_block, _gcry_aes_aesni_encrypt)
(_gcry_aes_aesni_decrypt, _gcry_aes_arm_encrypt_block)
(_gcry_aes_arm_decrypt_block): Change prototypes.
(do_padlock_encrypt, do_padlock_decrypt): New.
(do_setkey): Separate key-length to rounds conversion from
HW features check; Add selection for ctx->encrypt_fn and
ctx->decrypt_fn.
(do_encrypt_aligned, do_decrypt_aligned): Move inside
'[!USE_AMD64_ASM && !USE_ARM_ASM]'; Move USE_AMD64_ASM and
USE_ARM_ASM to...
(do_encrypt, do_decrypt): ...here; Return stack depth; Remove second
temporary buffer from non-aligned input/output case.
(do_padlock): Move decrypt_flag to last argument; Return stack depth.
(rijndael_encrypt): Remove #ifdefs, just call ctx->encrypt_fn.
(_gcry_aes_cfb_enc, _gcry_aes_cbc_enc): Remove USE_PADLOCK; Call
ctx->encrypt_fn in place of do_encrypt/do_encrypt_aligned.
(_gcry_aes_ctr_enc): Call ctx->encrypt_fn in place of
do_encrypt_aligned; Make tmp buffer 16-byte aligned and wipe buffer
after use.
(rijndael_encrypt): Remove #ifdefs, just call ctx->decrypt_fn.
(_gcry_aes_cfb_dec): Remove USE_PADLOCK; Call ctx->decrypt_fn in place
of do_decrypt/do_decrypt_aligned.
(_gcry_aes_cbc_dec): Ditto; Make savebuf buffer 16-byte aligned.
--
Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>
---
cipher/rijndael-aesni.c | 10 +
cipher/rijndael-amd64.S | 2
cipher/rijndael-arm.S | 3
cipher/rijndael-internal.h | 11 +
cipher/rijndael.c | 361 ++++++++++++++++++--------------------------
5 files changed, 164 insertions(+), 223 deletions(-)
diff --git a/cipher/rijndael-aesni.c b/cipher/rijndael-aesni.c
index 15ed4ad..e6c1051 100644
--- a/cipher/rijndael-aesni.c
+++ b/cipher/rijndael-aesni.c
@@ -1021,13 +1021,14 @@ do_aesni_ctr_4 (const RIJNDAEL_context *ctx,
}
-void
-_gcry_aes_aesni_encrypt (RIJNDAEL_context *ctx, unsigned char *dst,
+unsigned int
+_gcry_aes_aesni_encrypt (const RIJNDAEL_context *ctx, unsigned char *dst,
const unsigned char *src)
{
aesni_prepare ();
do_aesni_enc (ctx, dst, src);
aesni_cleanup ();
+ return 0;
}
@@ -1128,13 +1129,14 @@ _gcry_aes_aesni_ctr_enc (RIJNDAEL_context *ctx, unsigned char *outbuf,
}
-void
-_gcry_aes_aesni_decrypt (RIJNDAEL_context *ctx, unsigned char *dst,
+unsigned int
+_gcry_aes_aesni_decrypt (const RIJNDAEL_context *ctx, unsigned char *dst,
const unsigned char *src)
{
aesni_prepare ();
do_aesni_dec (ctx, dst, src);
aesni_cleanup ();
+ return 0;
}
diff --git a/cipher/rijndael-amd64.S b/cipher/rijndael-amd64.S
index 16a1ffd..90e804b 100644
--- a/cipher/rijndael-amd64.S
+++ b/cipher/rijndael-amd64.S
@@ -212,6 +212,7 @@ _gcry_aes_amd64_encrypt_block:
movq (2 * 8)(%rsp), %rbp;
addq $(5 * 8), %rsp;
+ movl $(6 * 8), %eax;
ret;
.align 4
@@ -334,6 +335,7 @@ _gcry_aes_amd64_decrypt_block:
movq (2 * 8)(%rsp), %rbp;
addq $(5 * 8), %rsp;
+ movl $(6 * 8), %eax;
ret;
.align 4
diff --git a/cipher/rijndael-arm.S b/cipher/rijndael-arm.S
index 6004ce8..c385e67 100644
--- a/cipher/rijndael-arm.S
+++ b/cipher/rijndael-arm.S
@@ -314,6 +314,8 @@ _gcry_aes_arm_encrypt_block:
/* write output block */
stm RT0, {RA, RB, RC, RD};
2:
+
+ mov r0, #(10 * 4);
pop {%r4-%r11, %ip, %pc};
.ltorg
@@ -565,6 +567,7 @@ _gcry_aes_arm_decrypt_block:
/* write output block */
stm RT0, {RA, RB, RC, RD};
2:
+ mov r0, #(10 * 4);
pop {%r4-%r11, %ip, %pc};
.ltorg
diff --git a/cipher/rijndael-internal.h b/cipher/rijndael-internal.h
index 762ea76..9898f0c 100644
--- a/cipher/rijndael-internal.h
+++ b/cipher/rijndael-internal.h
@@ -74,9 +74,14 @@
# endif
#endif /* ENABLE_AESNI_SUPPORT */
+struct RIJNDAEL_context_s;
+
+typedef unsigned int (*rijndael_cryptfn_t)(const struct RIJNDAEL_context_s *ctx,
+ unsigned char *bx,
+ const unsigned char *ax);
/* Our context object. */
-typedef struct
+typedef struct RIJNDAEL_context_s
{
/* The first fields are the keyschedule arrays. This is so that
they are aligned on a 16 byte boundary if using gcc. This
@@ -100,7 +105,7 @@ typedef struct
PROPERLY_ALIGNED_TYPE dummy;
byte keyschedule[MAXROUNDS+1][4][4];
} u2;
- int rounds; /* Key-length-dependent number of rounds. */
+ int rounds; /* Key-length-dependent number of rounds. */
unsigned int decryption_prepared:1; /* The decryption key schedule is available. */
#ifdef USE_PADLOCK
unsigned int use_padlock:1; /* Padlock shall be used. */
@@ -108,6 +113,8 @@ typedef struct
#ifdef USE_AESNI
unsigned int use_aesni:1; /* AES-NI shall be used. */
#endif /*USE_AESNI*/
+ rijndael_cryptfn_t encrypt_fn;
+ rijndael_cryptfn_t decrypt_fn;
} RIJNDAEL_context ATTR_ALIGNED_16;
/* Macros defining alias for the keyschedules. */
diff --git a/cipher/rijndael.c b/cipher/rijndael.c
index 8a76fad..f45171a 100644
--- a/cipher/rijndael.c
+++ b/cipher/rijndael.c
@@ -60,15 +60,15 @@ typedef u32 u32_a_t;
#ifdef USE_AMD64_ASM
/* AMD64 assembly implementations of AES */
-extern void _gcry_aes_amd64_encrypt_block(const void *keysched_enc,
- unsigned char *out,
- const unsigned char *in,
- int rounds);
-
-extern void _gcry_aes_amd64_decrypt_block(const void *keysched_dec,
- unsigned char *out,
- const unsigned char *in,
- int rounds);
+extern unsigned int _gcry_aes_amd64_encrypt_block(const void *keysched_enc,
+ unsigned char *out,
+ const unsigned char *in,
+ int rounds);
+
+extern unsigned int _gcry_aes_amd64_decrypt_block(const void *keysched_dec,
+ unsigned char *out,
+ const unsigned char *in,
+ int rounds);
#endif /*USE_AMD64_ASM*/
#ifdef USE_AESNI
@@ -76,10 +76,12 @@ extern void _gcry_aes_amd64_decrypt_block(const void *keysched_dec,
extern void _gcry_aes_aesni_do_setkey(RIJNDAEL_context *ctx, const byte *key);
extern void _gcry_aes_aesni_prepare_decryption(RIJNDAEL_context *ctx);
-extern void _gcry_aes_aesni_encrypt (RIJNDAEL_context *ctx, unsigned char *dst,
- const unsigned char *src);
-extern void _gcry_aes_aesni_decrypt (RIJNDAEL_context *ctx, unsigned char *dst,
- const unsigned char *src);
+extern unsigned int _gcry_aes_aesni_encrypt (const RIJNDAEL_context *ctx,
+ unsigned char *dst,
+ const unsigned char *src);
+extern unsigned int _gcry_aes_aesni_decrypt (const RIJNDAEL_context *ctx,
+ unsigned char *dst,
+ const unsigned char *src);
extern void _gcry_aes_aesni_cfb_enc (RIJNDAEL_context *ctx,
unsigned char *outbuf,
const unsigned char *inbuf,
@@ -103,19 +105,33 @@ extern void _gcry_aes_aesni_cbc_dec (RIJNDAEL_context *ctx,
unsigned char *iv, size_t nblocks);
#endif
+#ifdef USE_PADLOCK
+static unsigned int do_padlock_encrypt (const RIJNDAEL_context *ctx,
+ unsigned char *bx,
+ const unsigned char *ax);
+static unsigned int do_padlock_decrypt (const RIJNDAEL_context *ctx,
+ unsigned char *bx,
+ const unsigned char *ax);
+#endif
+
#ifdef USE_ARM_ASM
/* ARM assembly implementations of AES */
-extern void _gcry_aes_arm_encrypt_block(const void *keysched_enc,
- unsigned char *out,
- const unsigned char *in,
- int rounds);
-
-extern void _gcry_aes_arm_decrypt_block(const void *keysched_dec,
- unsigned char *out,
- const unsigned char *in,
- int rounds);
+extern unsigned int _gcry_aes_arm_encrypt_block(const void *keysched_enc,
+ unsigned char *out,
+ const unsigned char *in,
+ int rounds);
+
+extern unsigned int _gcry_aes_arm_decrypt_block(const void *keysched_dec,
+ unsigned char *out,
+ const unsigned char *in,
+ int rounds);
#endif /*USE_ARM_ASM*/
+static unsigned int do_encrypt (const RIJNDAEL_context *ctx, unsigned char *bx,
+ const unsigned char *ax);
+static unsigned int do_decrypt (const RIJNDAEL_context *ctx, unsigned char *bx,
+ const unsigned char *ax);
+
/* All the numbers. */
@@ -159,6 +175,26 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
if (selftest_failed)
return GPG_ERR_SELFTEST_FAILED;
+ if( keylen == 128/8 )
+ {
+ rounds = 10;
+ KC = 4;
+ }
+ else if ( keylen == 192/8 )
+ {
+ rounds = 12;
+ KC = 6;
+ }
+ else if ( keylen == 256/8 )
+ {
+ rounds = 14;
+ KC = 8;
+ }
+ else
+ return GPG_ERR_INV_KEYLEN;
+
+ ctx->rounds = rounds;
+
#if defined(USE_AESNI) || defined(USE_PADLOCK)
hwfeatures = _gcry_get_hw_features ();
#endif
@@ -171,65 +207,32 @@ do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
ctx->use_aesni = 0;
#endif
- if( keylen == 128/8 )
+ if (0)
{
- rounds = 10;
- KC = 4;
-
- if (0)
- {
- ;
- }
+ ;
+ }
#ifdef USE_AESNI
- else if (hwfeatures & HWF_INTEL_AESNI)
- {
- ctx->use_aesni = 1;
- }
+ else if (hwfeatures & HWF_INTEL_AESNI)
+ {
+ ctx->encrypt_fn = _gcry_aes_aesni_encrypt;
+ ctx->decrypt_fn = _gcry_aes_aesni_decrypt;
+ ctx->use_aesni = 1;
+ }
#endif
#ifdef USE_PADLOCK
- else if (hwfeatures & HWF_PADLOCK_AES)
- {
- ctx->use_padlock = 1;
- memcpy (ctx->padlockkey, key, keylen);
- }
-#endif
- }
- else if ( keylen == 192/8 )
+ else if (hwfeatures & HWF_PADLOCK_AES && keylen == 128/8)
{
- rounds = 12;
- KC = 6;
-
- if (0)
- {
- ;
- }
-#ifdef USE_AESNI
- else if (hwfeatures & HWF_INTEL_AESNI)
- {
- ctx->use_aesni = 1;
- }
-#endif
+ ctx->encrypt_fn = do_padlock_encrypt;
+ ctx->decrypt_fn = do_padlock_decrypt;
+ ctx->use_padlock = 1;
+ memcpy (ctx->padlockkey, key, keylen);
}
- else if ( keylen == 256/8 )
- {
- rounds = 14;
- KC = 8;
-
- if (0)
- {
- ;
- }
-#ifdef USE_AESNI
- else if (hwfeatures & HWF_INTEL_AESNI)
- {
- ctx->use_aesni = 1;
- }
#endif
- }
else
- return GPG_ERR_INV_KEYLEN;
-
- ctx->rounds = rounds;
+ {
+ ctx->encrypt_fn = do_encrypt;
+ ctx->decrypt_fn = do_decrypt;
+ }
/* NB: We don't yet support Padlock hardware key generation. */
@@ -404,17 +407,13 @@ prepare_decryption( RIJNDAEL_context *ctx )
}
+#if !defined(USE_AMD64_ASM) && !defined(USE_ARM_ASM)
/* Encrypt one block. A and B need to be aligned on a 4 byte
boundary. A and B may be the same. */
static void
do_encrypt_aligned (const RIJNDAEL_context *ctx,
unsigned char *b, const unsigned char *a)
{
-#ifdef USE_AMD64_ASM
- _gcry_aes_amd64_encrypt_block(ctx->keyschenc, b, a, ctx->rounds);
-#elif defined(USE_ARM_ASM)
- _gcry_aes_arm_encrypt_block(ctx->keyschenc, b, a, ctx->rounds);
-#else
#define rk (ctx->keyschenc)
int rounds = ctx->rounds;
int r;
@@ -496,15 +495,19 @@ do_encrypt_aligned (const RIJNDAEL_context *ctx,
*((u32_a_t*)(b+ 8)) ^= *((u32_a_t*)rk[rounds][2]);
*((u32_a_t*)(b+12)) ^= *((u32_a_t*)rk[rounds][3]);
#undef rk
-#endif /*!USE_AMD64_ASM && !USE_ARM_ASM*/
}
+#endif /*!USE_AMD64_ASM && !USE_ARM_ASM*/
-static void
+static unsigned int
do_encrypt (const RIJNDAEL_context *ctx,
unsigned char *bx, const unsigned char *ax)
{
-#if !defined(USE_AMD64_ASM) && !defined(USE_ARM_ASM)
+#ifdef USE_AMD64_ASM
+ return _gcry_aes_amd64_encrypt_block(ctx->keyschenc, bx, ax, ctx->rounds);
+#elif defined(USE_ARM_ASM)
+ return _gcry_aes_arm_encrypt_block(ctx->keyschenc, bx, ax, ctx->rounds);
+#else
/* BX and AX are not necessary correctly aligned. Thus we might
need to copy them here. We try to align to a 16 bytes. */
if (((size_t)ax & 0x0f) || ((size_t)bx & 0x0f))
@@ -514,30 +517,27 @@ do_encrypt (const RIJNDAEL_context *ctx,
u32 dummy[4];
byte a[16] ATTR_ALIGNED_16;
} a;
- union
- {
- u32 dummy[4];
- byte b[16] ATTR_ALIGNED_16;
- } b;
buf_cpy (a.a, ax, 16);
- do_encrypt_aligned (ctx, b.b, a.a);
- buf_cpy (bx, b.b, 16);
+ do_encrypt_aligned (ctx, a.a, a.a);
+ buf_cpy (bx, a.a, 16);
}
else
-#endif /*!USE_AMD64_ASM && !USE_ARM_ASM*/
{
do_encrypt_aligned (ctx, bx, ax);
}
+
+ return (56 + 2*sizeof(int));
+#endif /*!USE_AMD64_ASM && !USE_ARM_ASM*/
}
/* Encrypt or decrypt one block using the padlock engine. A and B may
be the same. */
#ifdef USE_PADLOCK
-static void
-do_padlock (const RIJNDAEL_context *ctx, int decrypt_flag,
- unsigned char *bx, const unsigned char *ax)
+static unsigned int
+do_padlock (const RIJNDAEL_context *ctx, unsigned char *bx,
+ const unsigned char *ax, int decrypt_flag)
{
/* BX and AX are not necessary correctly aligned. Thus we need to
copy them here. */
@@ -583,6 +583,21 @@ do_padlock (const RIJNDAEL_context *ctx, int decrypt_flag,
memcpy (bx, b, 16);
+ return (48 + 15 /* possible padding for alignment */);
+}
+
+static unsigned int
+do_padlock_encrypt (const RIJNDAEL_context *ctx,
+ unsigned char *bx, const unsigned char *ax)
+{
+ return do_padlock(ctx, bx, ax, 0);
+}
+
+static unsigned int
+do_padlock_decrypt (const RIJNDAEL_context *ctx,
+ unsigned char *bx, const unsigned char *ax)
+{
+ return do_padlock(ctx, bx, ax, 1);
}
#endif /*USE_PADLOCK*/
@@ -591,31 +606,8 @@ static unsigned int
rijndael_encrypt (void *context, byte *b, const byte *a)
{
RIJNDAEL_context *ctx = context;
- unsigned int burn_stack;
-
- if (0)
- ;
-#ifdef USE_AESNI
- else if (ctx->use_aesni)
- {
- _gcry_aes_aesni_encrypt (ctx, b, a);
- burn_stack = 0;
- }
-#endif /*USE_AESNI*/
-#ifdef USE_PADLOCK
- else if (ctx->use_padlock)
- {
- do_padlock (ctx, 0, b, a);
- burn_stack = (48 + 15 /* possible padding for alignment */);
- }
-#endif /*USE_PADLOCK*/
- else
- {
- do_encrypt (ctx, b, a);
- burn_stack = (56 + 2*sizeof(int));
- }
- return burn_stack;
+ return ctx->encrypt_fn (ctx, b, a);
}
@@ -631,7 +623,7 @@ _gcry_aes_cfb_enc (void *context, unsigned char *iv,
RIJNDAEL_context *ctx = context;
unsigned char *outbuf = outbuf_arg;
const unsigned char *inbuf = inbuf_arg;
- unsigned int burn_depth = 48 + 2*sizeof(int);
+ unsigned int burn_depth = 0;
if (0)
;
@@ -642,27 +634,12 @@ _gcry_aes_cfb_enc (void *context, unsigned char *iv,
burn_depth = 0;
}
#endif /*USE_AESNI*/
-#ifdef USE_PADLOCK
- else if (ctx->use_padlock)
- {
- /* Fixme: Let Padlock do the CFBing. */
- for ( ;nblocks; nblocks-- )
- {
- /* Encrypt the IV. */
- do_padlock (ctx, 0, iv, iv);
- /* XOR the input with the IV and store input into IV. */
- buf_xor_2dst(outbuf, iv, inbuf, BLOCKSIZE);
- outbuf += BLOCKSIZE;
- inbuf += BLOCKSIZE;
- }
- }
-#endif /*USE_PADLOCK*/
else
{
for ( ;nblocks; nblocks-- )
{
/* Encrypt the IV. */
- do_encrypt_aligned (ctx, iv, iv);
+ burn_depth = ctx->encrypt_fn (ctx, iv, iv);
/* XOR the input with the IV and store input into IV. */
buf_xor_2dst(outbuf, iv, inbuf, BLOCKSIZE);
outbuf += BLOCKSIZE;
@@ -671,7 +648,7 @@ _gcry_aes_cfb_enc (void *context, unsigned char *iv,
}
if (burn_depth)
- _gcry_burn_stack (burn_depth);
+ _gcry_burn_stack (burn_depth + 4 * sizeof(void *));
}
@@ -688,7 +665,7 @@ _gcry_aes_cbc_enc (void *context, unsigned char *iv,
unsigned char *outbuf = outbuf_arg;
const unsigned char *inbuf = inbuf_arg;
unsigned char *last_iv;
- unsigned int burn_depth = 48 + 2*sizeof(int);
+ unsigned int burn_depth = 0;
if (0)
;
@@ -707,14 +684,7 @@ _gcry_aes_cbc_enc (void *context, unsigned char *iv,
{
buf_xor(outbuf, inbuf, last_iv, BLOCKSIZE);
- if (0)
- ;
-#ifdef USE_PADLOCK
- else if (ctx->use_padlock)
- do_padlock (ctx, 0, outbuf, outbuf);
-#endif /*USE_PADLOCK*/
- else
- do_encrypt (ctx, outbuf, outbuf );
+ burn_depth = ctx->encrypt_fn (ctx, outbuf, outbuf);
last_iv = outbuf;
inbuf += BLOCKSIZE;
@@ -727,7 +697,7 @@ _gcry_aes_cbc_enc (void *context, unsigned char *iv,
}
if (burn_depth)
- _gcry_burn_stack (burn_depth);
+ _gcry_burn_stack (burn_depth + 4 * sizeof(void *));
}
@@ -744,7 +714,7 @@ _gcry_aes_ctr_enc (void *context, unsigned char *ctr,
RIJNDAEL_context *ctx = context;
unsigned char *outbuf = outbuf_arg;
const unsigned char *inbuf = inbuf_arg;
- unsigned int burn_depth = 48 + 2*sizeof(int);
+ unsigned int burn_depth = 0;
int i;
if (0)
@@ -758,12 +728,12 @@ _gcry_aes_ctr_enc (void *context, unsigned char *ctr,
#endif /*USE_AESNI*/
else
{
- union { unsigned char x1[16]; u32 x32[4]; } tmp;
+ union { unsigned char x1[16] ATTR_ALIGNED_16; u32 x32[4]; } tmp;
for ( ;nblocks; nblocks-- )
{
/* Encrypt the counter. */
- do_encrypt_aligned (ctx, tmp.x1, ctr);
+ burn_depth = ctx->encrypt_fn (ctx, tmp.x1, ctr);
/* XOR the input with the encrypted counter and store in output. */
buf_xor(outbuf, tmp.x1, inbuf, BLOCKSIZE);
outbuf += BLOCKSIZE;
@@ -776,26 +746,24 @@ _gcry_aes_ctr_enc (void *context, unsigned char *ctr,
break;
}
}
+
+ wipememory(&tmp, sizeof(tmp));
}
if (burn_depth)
- _gcry_burn_stack (burn_depth);
+ _gcry_burn_stack (burn_depth + 4 * sizeof(void *));
}
+#if !defined(USE_AMD64_ASM) && !defined(USE_ARM_ASM)
/* Decrypt one block. A and B need to be aligned on a 4 byte boundary
and the decryption must have been prepared. A and B may be the
same. */
static void
-do_decrypt_aligned (RIJNDAEL_context *ctx,
+do_decrypt_aligned (const RIJNDAEL_context *ctx,
unsigned char *b, const unsigned char *a)
{
-#ifdef USE_AMD64_ASM
- _gcry_aes_amd64_decrypt_block(ctx->keyschdec, b, a, ctx->rounds);
-#elif defined(USE_ARM_ASM)
- _gcry_aes_arm_decrypt_block(ctx->keyschdec, b, a, ctx->rounds);
-#else
#define rk (ctx->keyschdec)
int rounds = ctx->rounds;
int r;
@@ -878,15 +846,20 @@ do_decrypt_aligned (RIJNDAEL_context *ctx,
*((u32_a_t*)(b+ 8)) ^= *((u32_a_t*)rk[0][2]);
*((u32_a_t*)(b+12)) ^= *((u32_a_t*)rk[0][3]);
#undef rk
-#endif /*!USE_AMD64_ASM && !USE_ARM_ASM*/
}
+#endif /*!USE_AMD64_ASM && !USE_ARM_ASM*/
/* Decrypt one block. AX and BX may be the same. */
-static void
-do_decrypt (RIJNDAEL_context *ctx, byte *bx, const byte *ax)
+static unsigned int
+do_decrypt (const RIJNDAEL_context *ctx, unsigned char *bx,
+ const unsigned char *ax)
{
-#if !defined(USE_AMD64_ASM) && !defined(USE_ARM_ASM)
+#ifdef USE_AMD64_ASM
+ return _gcry_aes_amd64_decrypt_block(ctx->keyschdec, bx, ax, ctx->rounds);
+#elif defined(USE_ARM_ASM)
+ return _gcry_aes_arm_decrypt_block(ctx->keyschdec, bx, ax, ctx->rounds);
+#else
/* BX and AX are not necessary correctly aligned. Thus we might
need to copy them here. We try to align to a 16 bytes. */
if (((size_t)ax & 0x0f) || ((size_t)bx & 0x0f))
@@ -896,21 +869,18 @@ do_decrypt (RIJNDAEL_context *ctx, byte *bx, const byte *ax)
u32 dummy[4];
byte a[16] ATTR_ALIGNED_16;
} a;
- union
- {
- u32 dummy[4];
- byte b[16] ATTR_ALIGNED_16;
- } b;
buf_cpy (a.a, ax, 16);
- do_decrypt_aligned (ctx, b.b, a.a);
- buf_cpy (bx, b.b, 16);
+ do_decrypt_aligned (ctx, a.a, a.a);
+ buf_cpy (bx, a.a, 16);
}
else
-#endif /*!USE_AMD64_ASM && !USE_ARM_ASM*/
{
do_decrypt_aligned (ctx, bx, ax);
}
+
+ return (56+2*sizeof(int));
+#endif /*!USE_AMD64_ASM && !USE_ARM_ASM*/
}
@@ -929,33 +899,10 @@ static unsigned int
rijndael_decrypt (void *context, byte *b, const byte *a)
{
RIJNDAEL_context *ctx = context;
- unsigned int burn_stack;
check_decryption_preparation (ctx);
- if (0)
- ;
-#ifdef USE_AESNI
- else if (ctx->use_aesni)
- {
- _gcry_aes_aesni_decrypt (ctx, b, a);
- burn_stack = 0;
- }
-#endif /*USE_AESNI*/
-#ifdef USE_PADLOCK
- else if (ctx->use_padlock)
- {
- do_padlock (ctx, 1, b, a);
- burn_stack = (48 + 2*sizeof(int) /* FIXME */);
- }
-#endif /*USE_PADLOCK*/
- else
- {
- do_decrypt (ctx, b, a);
- burn_stack = (56+2*sizeof(int));
- }
-
- return burn_stack;
+ return ctx->decrypt_fn (ctx, b, a);
}
@@ -971,7 +918,7 @@ _gcry_aes_cfb_dec (void *context, unsigned char *iv,
RIJNDAEL_context *ctx = context;
unsigned char *outbuf = outbuf_arg;
const unsigned char *inbuf = inbuf_arg;
- unsigned int burn_depth = 48 + 2*sizeof(int);
+ unsigned int burn_depth = 0;
if (0)
;
@@ -982,24 +929,11 @@ _gcry_aes_cfb_dec (void *context, unsigned char *iv,
burn_depth = 0;
}
#endif /*USE_AESNI*/
-#ifdef USE_PADLOCK
- else if (ctx->use_padlock)
- {
- /* Fixme: Let Padlock do the CFBing. */
- for ( ;nblocks; nblocks-- )
- {
- do_padlock (ctx, 0, iv, iv);
- buf_xor_n_copy(outbuf, iv, inbuf, BLOCKSIZE);
- outbuf += BLOCKSIZE;
- inbuf += BLOCKSIZE;
- }
- }
-#endif /*USE_PADLOCK*/
else
{
for ( ;nblocks; nblocks-- )
{
- do_encrypt_aligned (ctx, iv, iv);
+ burn_depth = ctx->encrypt_fn (ctx, iv, iv);
buf_xor_n_copy(outbuf, iv, inbuf, BLOCKSIZE);
outbuf += BLOCKSIZE;
inbuf += BLOCKSIZE;
@@ -1007,7 +941,7 @@ _gcry_aes_cfb_dec (void *context, unsigned char *iv,
}
if (burn_depth)
- _gcry_burn_stack (burn_depth);
+ _gcry_burn_stack (burn_depth + 4 * sizeof(void *));
}
@@ -1023,7 +957,7 @@ _gcry_aes_cbc_dec (void *context, unsigned char *iv,
RIJNDAEL_context *ctx = context;
unsigned char *outbuf = outbuf_arg;
const unsigned char *inbuf = inbuf_arg;
- unsigned int burn_depth = 48 + 2*sizeof(int) + 4*sizeof (char*);
+ unsigned int burn_depth = 0;
check_decryption_preparation (ctx);
@@ -1038,21 +972,14 @@ _gcry_aes_cbc_dec (void *context, unsigned char *iv,
#endif /*USE_AESNI*/
else
{
- unsigned char savebuf[BLOCKSIZE];
+ unsigned char savebuf[BLOCKSIZE] ATTR_ALIGNED_16;
for ( ;nblocks; nblocks-- )
{
/* INBUF is needed later and it may be identical to OUTBUF, so store
the intermediate result to SAVEBUF. */
- if (0)
- ;
-#ifdef USE_PADLOCK
- else if (ctx->use_padlock)
- do_padlock (ctx, 1, savebuf, inbuf);
-#endif /*USE_PADLOCK*/
- else
- do_decrypt (ctx, savebuf, inbuf);
+ burn_depth = ctx->decrypt_fn (ctx, savebuf, inbuf);
buf_xor_n_copy_2(outbuf, savebuf, iv, inbuf, BLOCKSIZE);
inbuf += BLOCKSIZE;
@@ -1063,7 +990,7 @@ _gcry_aes_cbc_dec (void *context, unsigned char *iv,
}
if (burn_depth)
- _gcry_burn_stack (burn_depth);
+ _gcry_burn_stack (burn_depth + 4 * sizeof(void *));
}
More information about the Gcrypt-devel
mailing list