aboutsummaryrefslogtreecommitdiffstats
path: root/scripts/conf-w32brg/cipher
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/conf-w32brg/cipher')
-rw-r--r--scripts/conf-w32brg/cipher/aes.h158
-rw-r--r--scripts/conf-w32brg/cipher/aescrypt.asm404
-rw-r--r--scripts/conf-w32brg/cipher/aescrypt.c311
-rw-r--r--scripts/conf-w32brg/cipher/aeskey.c463
-rw-r--r--scripts/conf-w32brg/cipher/aesopt.h1042
-rw-r--r--scripts/conf-w32brg/cipher/aestab.c232
-rw-r--r--scripts/conf-w32brg/cipher/rijndael2.c279
7 files changed, 2889 insertions, 0 deletions
diff --git a/scripts/conf-w32brg/cipher/aes.h b/scripts/conf-w32brg/cipher/aes.h
new file mode 100644
index 000000000..1bec72059
--- /dev/null
+++ b/scripts/conf-w32brg/cipher/aes.h
@@ -0,0 +1,158 @@
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman <[email protected]>, Worcester, UK.
+ All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+ 1. distributions of this source code include the above copyright
+ notice, this list of conditions and the following disclaimer;
+
+ 2. distributions in binary form include the above copyright
+ notice, this list of conditions and the following disclaimer
+ in the documentation and/or other associated materials;
+
+ 3. the copyright holder's name is not used to endorse products
+ built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue Date: 1/06/2003
+
+ This file contains the definitions required to use AES in C. See aesopt.h
+ for optimisation details.
+*/
+
+#ifndef _AES_H
+#define _AES_H
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#define AES_128 /* define if AES with 128 bit keys is needed */
+#define AES_192 /* define if AES with 192 bit keys is needed */
+#define AES_256 /* define if AES with 256 bit keys is needed */
+#define AES_VAR /* define if a variable key size is needed */
+
+/* The following must also be set in assembler files if being used */
+
+#define AES_ENCRYPT /* if support for encryption is needed */
+#define AES_DECRYPT /* if support for decryption is needed */
+#define AES_ERR_CHK /* for parameter checks & error return codes */
+
+/* This include is used to find 8 & 32 bit unsigned integer types */
+#include "limits.h"
+
+#if UCHAR_MAX == 0xff /* an unsigned 8 bit type */
+ typedef unsigned char aes_08t;
+#else
+#error Please define aes_08t as an 8-bit unsigned integer type in aes.h
+#endif
+
+#if UINT_MAX == 0xffffffff /* an unsigned 32 bit type */
+ typedef unsigned int aes_32t;
+#elif ULONG_MAX == 0xffffffff
+ typedef unsigned long aes_32t;
+#else
+#error Please define aes_32t as a 32-bit unsigned integer type in aes.h
+#endif
+
+#define AES_BLOCK_SIZE 16 /* the AES block size in bytes */
+#define N_COLS 4 /* the number of columns in the state */
+
+/* a maximum of 60 32-bit words are needed for the key schedule but */
+/* 64 are claimed to allow space at the top for a CBC xor buffer. */
+/* If this is not needed, this value can be reduced to 60. A value */
+/* of 64 may also help in maintaining alignment in some situations */
+#define KS_LENGTH 64
+
+#ifdef AES_ERR_CHK
+#define aes_ret int
+#define aes_good 0
+#define aes_error -1
+#else
+#define aes_ret void
+#endif
+
+#ifndef AES_DLL /* implement normal/DLL functions */
+#define aes_rval aes_ret
+#else
+#define aes_rval aes_ret __declspec(dllexport) _stdcall
+#endif
+
+/* This routine must be called before first use if non-static */
+/* tables are being used */
+
+void gen_tabs(void);
+
+/* The key length (klen) is input in bytes when it is in the range */
+/* 16 <= klen <= 32 or in bits when in the range 128 <= klen <= 256 */
+
+#ifdef AES_ENCRYPT
+
+typedef struct
+{ aes_32t ks[KS_LENGTH];
+} aes_encrypt_ctx;
+
+#if defined(AES_128) || defined(AES_VAR)
+aes_rval aes_encrypt_key128(const void *in_key, aes_encrypt_ctx cx[1]);
+#endif
+
+#if defined(AES_192) || defined(AES_VAR)
+aes_rval aes_encrypt_key192(const void *in_key, aes_encrypt_ctx cx[1]);
+#endif
+
+#if defined(AES_256) || defined(AES_VAR)
+aes_rval aes_encrypt_key256(const void *in_key, aes_encrypt_ctx cx[1]);
+#endif
+
+#if defined(AES_VAR)
+aes_rval aes_encrypt_key(const void *in_key, int key_len, aes_encrypt_ctx cx[1]);
+#endif
+
+aes_rval aes_encrypt(const void *in_blk, void *out_blk, const aes_encrypt_ctx cx[1]);
+#endif
+
+#ifdef AES_DECRYPT
+
+typedef struct
+{ aes_32t ks[KS_LENGTH];
+} aes_decrypt_ctx;
+
+#if defined(AES_128) || defined(AES_VAR)
+aes_rval aes_decrypt_key128(const void *in_key, aes_decrypt_ctx cx[1]);
+#endif
+
+#if defined(AES_192) || defined(AES_VAR)
+aes_rval aes_decrypt_key192(const void *in_key, aes_decrypt_ctx cx[1]);
+#endif
+
+#if defined(AES_256) || defined(AES_VAR)
+aes_rval aes_decrypt_key256(const void *in_key, aes_decrypt_ctx cx[1]);
+#endif
+
+#if defined(AES_VAR)
+aes_rval aes_decrypt_key(const void *in_key, int key_len, aes_decrypt_ctx cx[1]);
+#endif
+
+aes_rval aes_decrypt(const void *in_blk, void *out_blk, const aes_decrypt_ctx cx[1]);
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/scripts/conf-w32brg/cipher/aescrypt.asm b/scripts/conf-w32brg/cipher/aescrypt.asm
new file mode 100644
index 000000000..90beaaabf
--- /dev/null
+++ b/scripts/conf-w32brg/cipher/aescrypt.asm
@@ -0,0 +1,404 @@
+
+; ---------------------------------------------------------------------------
+; Copyright (c) 2002, Dr Brian Gladman <[email protected]>, Worcester, UK.
+; All rights reserved.
+;
+; LICENSE TERMS
+;
+; The free distribution and use of this software in both source and binary
+; form is allowed (with or without changes) provided that:
+;
+; 1. distributions of this source code include the above copyright
+; notice, this list of conditions and the following disclaimer;
+;
+; 2. distributions in binary form include the above copyright
+; notice, this list of conditions and the following disclaimer
+; in the documentation and/or other associated materials;
+;
+; 3. the copyright holder's name is not used to endorse products
+; built using this software without specific written permission.
+;
+; ALTERNATIVELY, provided that this notice is retained in full, this product
+; may be distributed under the terms of the GNU General Public License (GPL),
+; in which case the provisions of the GPL apply INSTEAD OF those given above.
+;
+; DISCLAIMER
+;
+; This software is provided 'as is' with no explicit or implied warranties
+; in respect of its properties, including, but not limited to, correctness
+; and/or fitness for purpose.
+; ---------------------------------------------------------------------------
+; Issue Date: 1/06/2003
+
+; An AES implementation for Pentium processors using the NASM assembler (see
+; <http://sourceforge.net/projects/nasm>).This version provides the standard
+; AES block length (128 bits, 16 bytes) with the same interface as that used
+; in my C implementation. The eax, ecx and edx registers and the artihmetic
+; status flags are not preserved. The ebx, esi, edi, and ebp registers are
+; preserved across calls. Only encryption and decryption are provided here,
+; here, the key scheduling code being that in aeskey.c compiled with USE_ASM
+; defined. This code uses the VC++ register saving conentions; if it is used
+; with another compiler, its conventions for using and saving registers will
+; need to be checked (and calling conventions). The NASM command line for
+; the VC++ custom build step is:
+;
+; nasm -O2 -f win32 -o "$(TargetDir)\$(InputName).obj" "$(InputPath)"
+
+ section .text ; use32
+
+; aes_rval aes_encrypt(const unsigned char in_blk[],
+; unsigned char out_blk[], const aes_encrypt_ctx cx[1]);
+; aes_rval aes_decrypt(const unsigned char in_blk[],
+; unsigned char out_blk[], const aes_decrypt_ctx cx[1]);
+;
+; comment in/out the following lines to obtain the desired subroutines
+
+%define ENCRYPTION ; define if encryption is needed
+%define DECRYPTION ; define if decryption is needed
+
+; The DLL interface must use the _stdcall convention in which the number
+; of bytes of parameter space is added after an @ to the sutine's name.
+; We must also remove our parameters from the stack before return (see
+; the do_ret macro). Define AES_DLL for the Dynamic Link Library version.
+
+;%define AES_DLL
+
+tlen: equ 1024 ; length of each of 4 'xor' arrays (256 32-bit words)
+
+; offsets to parameters with one register pushed onto stack
+
+in_blk: equ 4 ; input byte array address parameter
+out_blk:equ 8 ; output byte array address parameter
+ctx: equ 12 ; AES context structure
+stk_spc:equ 24 ; stack space
+
+; register mapping for encrypt and decrypt subroutines
+
+%define r0 eax
+%define r1 ebx
+%define r2 esi
+%define r3 edi
+%define r4 ecx
+%define r5 edx
+%define r6 ebp
+
+%define eaxl al
+%define eaxh ah
+%define ebxl bl
+%define ebxh bh
+%define ecxl cl
+%define ecxh ch
+%define edxl dl
+%define edxh dh
+
+; These macros take a 32-bit word representing a column and use each
+; of its 4 bytes to index a table of 256 32-bit words which are xored
+; into each of the four output columns. The output values are in the
+; registers %1, %2, %3 and %4 and the column input is in %5 with %6
+; as a scratch register.
+
+; Parameters:
+; %1 out_state[0]
+; %2 out_state[1]
+; %3 out_state[2]
+; %4 out_state[3]
+; %5 input register for the round (destroyed)
+; %6 scratch register for the round
+; %7 key schedule address for round (in form r6 + offset)
+
+%macro do_fcol 8 ; first column forward round
+
+ movzx %6,%5l
+ mov %1,[%8]
+ xor %1,[4*%6+%7]
+ movzx %6,%5h
+ shr %5,16
+ mov %2,[%8+12]
+ xor %2,[4*%6+%7+tlen]
+ movzx %6,%5l
+ mov %3,[%8+ 8]
+ xor %3,[4*%6+%7+2*tlen]
+ movzx %6,%5h
+ mov %5,%4 ; save an input register value
+ mov %4,[%8+ 4]
+ xor %4,[4*%6+%7+3*tlen]
+
+%endmacro
+
+%macro do_icol 8 ; first column for inverse round
+
+ movzx %6,%5l
+ mov %1,[%8]
+ xor %1,[4*%6+%7]
+ movzx %6,%5h
+ shr %5,16
+ mov %2,[%8+ 4]
+ xor %2,[4*%6+%7+tlen]
+ movzx %6,%5l
+ mov %3,[%8+ 8]
+ xor %3,[4*%6+%7+2*tlen]
+ movzx %6,%5h
+ mov %5,%4 ; save an input register value
+ mov %4,[%8+12]
+ xor %4,[4*%6+%7+3*tlen]
+
+%endmacro
+
+%macro do_col 7 ; other columns for forward and inverse rounds
+
+ movzx %6,%5l
+ xor %1,[4*%6+%7]
+ movzx %6,%5h
+ shr %5,16
+ xor %2,[4*%6+%7+tlen]
+ movzx %6,%5l
+ xor %3,[4*%6+%7+2*tlen]
+ movzx %6,%5h
+ xor %4,[4*%6+%7+3*tlen]
+
+%endmacro
+
+; These macros implement stack based local variables
+
+%macro save 2
+ mov [esp+4*%1],%2
+%endmacro
+
+%macro restore 2
+ mov %1,[esp+4*%2]
+%endmacro
+
+; This macro performs a forward encryption cycle. It is entered with
+; the first previous round column values in r0, r1, r2 and r3 and
+; exits with the final values in the same registers.
+
+%macro fwd_rnd 1-2 _t_fn ; normal forward rounds
+
+ mov r4,r0
+ save 0,r2
+ save 1,r3
+
+; compute new column values
+
+ do_fcol r0,r3,r2,r1, r4,r5, %2, %1 ; r4 = input r0
+ do_col r1,r0,r3,r2, r4,r5, %2 ; r4 = input r1 (saved in fcol_f)
+ restore r4,0
+ do_col r2,r1,r0,r3, r4,r5, %2 ; r4 = input r2
+ restore r4,1
+ do_col r3,r2,r1,r0, r4,r5, %2 ; r4 = input r3
+
+%endmacro
+
+; This macro performs an inverse encryption cycle. It is entered with
+; the first previous round column values in r0, r1, r2 and r3 and
+; exits with the final values in the same registers.
+
+%macro inv_rnd 1-2 _t_in ; normal inverse round
+
+ mov r4,r0
+ save 0,r1
+ save 1,r2
+
+; compute new column values
+
+ do_icol r0,r1,r2,r3, r4,r5, %2, %1 ; r4 = r0
+ do_col r3,r0,r1,r2, r4,r5, %2 ; r4 = r3 (saved in icol_f)
+ restore r4,1
+ do_col r2,r3,r0,r1, r4,r5, %2 ; r4 = r2
+ restore r4,0
+ do_col r1,r2,r3,r0, r4,r5, %2 ; r4 = r1
+
+%endmacro
+
+; the DLL has to implement the _stdcall calling interface on return
+; In this case we have to take our parameters (3 4-byte pointers)
+; off the stack
+
+%macro do_ret 0
+%ifdef AES_DLL
+ ret 12
+%else
+ ret
+%endif
+%endmacro
+
+%macro do_name 1
+%ifndef AES_DLL
+ global %1
+%1:
+%else
+ global %1@12
+ export %1@12
+%1@12:
+%endif
+%endmacro
+
+; AES Encryption Subroutine
+
+%ifdef ENCRYPTION
+
+ extern _t_fn
+ extern _t_fl
+
+ do_name _aes_encrypt
+
+ sub esp,stk_spc
+ mov [esp+20],ebp
+ mov [esp+16],ebx
+ mov [esp+12],esi
+ mov [esp+ 8],edi
+ mov r4,[esp+in_blk+stk_spc] ; input pointer
+ mov r6,[esp+ctx+stk_spc] ; key pointer
+
+; input four columns and xor in first round key
+
+ mov r0,[r4 ]
+ mov r1,[r4+ 4]
+ xor r0,[r6 ]
+ xor r1,[r6+ 4]
+ mov r2,[r4+ 8]
+ mov r3,[r4+12]
+ xor r2,[r6+ 8]
+ xor r3,[r6+12]
+
+; determine the number of rounds
+
+ mov r4,[r6+4*45]
+ mov r5,[r6+4*52]
+ xor r4,[r6+4*53]
+ xor r4,r5
+ je .1
+ cmp r5,10
+ je .3
+ cmp r5,12
+ je .2
+ mov ebp,[esp+20]
+ mov ebx,[esp+16]
+ mov esi,[esp+12]
+ mov edi,[esp+ 8]
+ lea esp,[esp+stk_spc]
+ mov eax,-1
+ do_ret
+
+.1: fwd_rnd r6+ 16 ; 14 rounds for 256-bit key
+ fwd_rnd r6+ 32
+ lea r6,[r6+32]
+.2: fwd_rnd r6+ 16 ; 12 rounds for 192-bit key
+ fwd_rnd r6+ 32
+ lea r6,[r6+32]
+.3: fwd_rnd r6+ 16 ; 10 rounds for 128-bit key
+ fwd_rnd r6+ 32
+ fwd_rnd r6+ 48
+ fwd_rnd r6+ 64
+ fwd_rnd r6+ 80
+ fwd_rnd r6+ 96
+ fwd_rnd r6+112
+ fwd_rnd r6+128
+ fwd_rnd r6+144
+ fwd_rnd r6+160, _t_fl ; last round uses a different table
+
+; move final values to the output array
+
+ mov r6,[esp+out_blk+stk_spc]
+ mov [r6+12],r3
+ mov [r6+8],r2
+ mov [r6+4],r1
+ mov [r6],r0
+ mov ebp,[esp+20]
+ mov ebx,[esp+16]
+ mov esi,[esp+12]
+ mov edi,[esp+ 8]
+ lea esp,[esp+stk_spc]
+ xor eax,eax
+ do_ret
+
+%endif
+
+; AES Decryption Subroutine
+
+%ifdef DECRYPTION
+
+ extern _t_in
+ extern _t_il
+
+ do_name _aes_decrypt
+
+ sub esp,stk_spc
+ mov [esp+20],ebp
+ mov [esp+16],ebx
+ mov [esp+12],esi
+ mov [esp+ 8],edi
+ mov r4,[esp+in_blk+stk_spc] ; input pointer
+ mov r6,[esp+ctx+stk_spc] ; context pointer
+
+; input four columns
+
+ mov r0,[r4]
+ mov r1,[r4+4]
+ mov r2,[r4+8]
+ mov r3,[r4+12]
+
+; determine the number of rounds
+
+ mov r5,[r6+4*52]
+ mov r4,[r6+4*45]
+ xor r4,[r6+4*53]
+ xor r4,r5
+ jne .1
+ mov r5,14
+
+; xor in initial keys
+
+.1: lea r4,[4*r5]
+ xor r0,[r6+4*r4 ]
+ xor r1,[r6+4*r4+ 4]
+ xor r2,[r6+4*r4+ 8]
+ xor r3,[r6+4*r4+12]
+ cmp r5,10
+ je .3
+ cmp r5,12
+ je .2
+ cmp r5,14
+ jne .4
+
+ inv_rnd r6+208 ; 14 rounds for 256-bit key
+ inv_rnd r6+192
+.2: inv_rnd r6+176 ; 12 rounds for 192-bit key
+ inv_rnd r6+160
+.3: inv_rnd r6+144 ; 10 rounds for 128-bit key
+ inv_rnd r6+128
+ inv_rnd r6+112
+ inv_rnd r6+ 96
+ inv_rnd r6+ 80
+ inv_rnd r6+ 64
+ inv_rnd r6+ 48
+ inv_rnd r6+ 32
+ inv_rnd r6+ 16
+ inv_rnd r6, _t_il ; last round uses a different table
+
+; move final values to the output array.
+
+ mov r6,[esp+out_blk+stk_spc]
+ mov [r6+12],r3
+ mov [r6+8],r2
+ mov [r6+4],r1
+ mov [r6],r0
+ mov ebp,[esp+20]
+ mov ebx,[esp+16]
+ mov esi,[esp+12]
+ mov edi,[esp+ 8]
+ lea esp,[esp+stk_spc]
+ xor eax,eax
+ do_ret
+
+.4: mov ebp,[esp+20]
+ mov ebx,[esp+16]
+ mov esi,[esp+12]
+ mov edi,[esp+ 8]
+ lea esp,[esp+stk_spc]
+ mov eax,-1
+ do_ret
+
+%endif
+
+ end
diff --git a/scripts/conf-w32brg/cipher/aescrypt.c b/scripts/conf-w32brg/cipher/aescrypt.c
new file mode 100644
index 000000000..944b0890a
--- /dev/null
+++ b/scripts/conf-w32brg/cipher/aescrypt.c
@@ -0,0 +1,311 @@
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman <[email protected]>, Worcester, UK.
+ All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+ 1. distributions of this source code include the above copyright
+ notice, this list of conditions and the following disclaimer;
+
+ 2. distributions in binary form include the above copyright
+ notice, this list of conditions and the following disclaimer
+ in the documentation and/or other associated materials;
+
+ 3. the copyright holder's name is not used to endorse products
+ built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue Date: 1/06/2003
+
+ This file contains the code for implementing encryption and decryption
+ for AES (Rijndael) for block and key sizes of 16, 24 and 32 bytes. It
+ can optionally be replaced by code written in assembler using NASM. For
+ further details see the file aesopt.h
+*/
+
+#include "aesopt.h"
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#define si(y,x,k,c) (s(y,c) = word_in(x, c) ^ (k)[c])
+#define so(y,x,c) word_out(y, c, s(x,c))
+
+#if defined(ARRAYS)
+#define locals(y,x) x[4],y[4]
+#else
+#define locals(y,x) x##0,x##1,x##2,x##3,y##0,y##1,y##2,y##3
+#endif
+
+#define l_copy(y, x) s(y,0) = s(x,0); s(y,1) = s(x,1); \
+ s(y,2) = s(x,2); s(y,3) = s(x,3);
+#define state_in(y,x,k) si(y,x,k,0); si(y,x,k,1); si(y,x,k,2); si(y,x,k,3)
+#define state_out(y,x) so(y,x,0); so(y,x,1); so(y,x,2); so(y,x,3)
+#define round(rm,y,x,k) rm(y,x,k,0); rm(y,x,k,1); rm(y,x,k,2); rm(y,x,k,3)
+
+#if defined(ENCRYPTION) && !defined(AES_ASM)
+
+/* Visual C++ .Net v7.1 provides the fastest encryption code when using
+ Pentium optimiation with small code but this is poor for decryption
+ so we need to control this with the following VC++ pragmas
+*/
+
+#if defined(_MSC_VER)
+#pragma optimize( "s", on )
+#endif
+
+/* Given the column (c) of the output state variable, the following
+ macros give the input state variables which are needed in its
+ computation for each row (r) of the state. All the alternative
+ macros give the same end values but expand into different ways
+ of calculating these values. In particular the complex macro
+ used for dynamically variable block sizes is designed to expand
+ to a compile time constant whenever possible but will expand to
+ conditional clauses on some branches (I am grateful to Frank
+ Yellin for this construction)
+*/
+
+#define fwd_var(x,r,c)\
+ ( r == 0 ? ( c == 0 ? s(x,0) : c == 1 ? s(x,1) : c == 2 ? s(x,2) : s(x,3))\
+ : r == 1 ? ( c == 0 ? s(x,1) : c == 1 ? s(x,2) : c == 2 ? s(x,3) : s(x,0))\
+ : r == 2 ? ( c == 0 ? s(x,2) : c == 1 ? s(x,3) : c == 2 ? s(x,0) : s(x,1))\
+ : ( c == 0 ? s(x,3) : c == 1 ? s(x,0) : c == 2 ? s(x,1) : s(x,2)))
+
+#if defined(FT4_SET)
+#undef dec_fmvars
+#define fwd_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(f,n),fwd_var,rf1,c))
+#elif defined(FT1_SET)
+#undef dec_fmvars
+#define fwd_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,upr,t_use(f,n),fwd_var,rf1,c))
+#else
+#define fwd_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ fwd_mcol(no_table(x,t_use(s,box),fwd_var,rf1,c)))
+#endif
+
+#if defined(FL4_SET)
+#define fwd_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(f,l),fwd_var,rf1,c))
+#elif defined(FL1_SET)
+#define fwd_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,ups,t_use(f,l),fwd_var,rf1,c))
+#else
+#define fwd_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ no_table(x,t_use(s,box),fwd_var,rf1,c))
+#endif
+
+aes_rval aes_encrypt(const void *in_blk, void *out_blk, const aes_encrypt_ctx cx[1])
+{ aes_32t locals(b0, b1);
+ const aes_32t *kp = cx->ks;
+#ifdef dec_fmvars
+ dec_fmvars; /* declare variables for fwd_mcol() if needed */
+#endif
+
+ aes_32t nr = (kp[45] ^ kp[52] ^ kp[53] ? kp[52] : 14);
+
+#ifdef AES_ERR_CHK
+ if( (nr != 10 || !(kp[0] | kp[3] | kp[4]))
+ && (nr != 12 || !(kp[0] | kp[5] | kp[6]))
+ && (nr != 14 || !(kp[0] | kp[7] | kp[8])) )
+ return aes_error;
+#endif
+
+ state_in(b0, in_blk, kp);
+
+#if (ENC_UNROLL == FULL)
+
+ switch(nr)
+ {
+ case 14:
+ round(fwd_rnd, b1, b0, kp + 1 * N_COLS);
+ round(fwd_rnd, b0, b1, kp + 2 * N_COLS);
+ kp += 2 * N_COLS;
+ case 12:
+ round(fwd_rnd, b1, b0, kp + 1 * N_COLS);
+ round(fwd_rnd, b0, b1, kp + 2 * N_COLS);
+ kp += 2 * N_COLS;
+ case 10:
+ round(fwd_rnd, b1, b0, kp + 1 * N_COLS);
+ round(fwd_rnd, b0, b1, kp + 2 * N_COLS);
+ round(fwd_rnd, b1, b0, kp + 3 * N_COLS);
+ round(fwd_rnd, b0, b1, kp + 4 * N_COLS);
+ round(fwd_rnd, b1, b0, kp + 5 * N_COLS);
+ round(fwd_rnd, b0, b1, kp + 6 * N_COLS);
+ round(fwd_rnd, b1, b0, kp + 7 * N_COLS);
+ round(fwd_rnd, b0, b1, kp + 8 * N_COLS);
+ round(fwd_rnd, b1, b0, kp + 9 * N_COLS);
+ round(fwd_lrnd, b0, b1, kp +10 * N_COLS);
+ }
+
+#else
+
+#if (ENC_UNROLL == PARTIAL)
+ { aes_32t rnd;
+ for(rnd = 0; rnd < (nr >> 1) - 1; ++rnd)
+ {
+ kp += N_COLS;
+ round(fwd_rnd, b1, b0, kp);
+ kp += N_COLS;
+ round(fwd_rnd, b0, b1, kp);
+ }
+ kp += N_COLS;
+ round(fwd_rnd, b1, b0, kp);
+#else
+ { aes_32t rnd;
+ for(rnd = 0; rnd < nr - 1; ++rnd)
+ {
+ kp += N_COLS;
+ round(fwd_rnd, b1, b0, kp);
+ l_copy(b0, b1);
+ }
+#endif
+ kp += N_COLS;
+ round(fwd_lrnd, b0, b1, kp);
+ }
+#endif
+
+ state_out(out_blk, b0);
+#ifdef AES_ERR_CHK
+ return aes_good;
+#endif
+}
+
+#endif
+
+#if defined(DECRYPTION) && !defined(AES_ASM)
+
+/* Visual C++ .Net v7.1 provides the fastest encryption code when using
+ Pentium optimiation with small code but this is poor for decryption
+ so we need to control this with the following VC++ pragmas
+*/
+
+#if defined(_MSC_VER)
+#pragma optimize( "t", on )
+#endif
+
+/* Given the column (c) of the output state variable, the following
+ macros give the input state variables which are needed in its
+ computation for each row (r) of the state. All the alternative
+ macros give the same end values but expand into different ways
+ of calculating these values. In particular the complex macro
+ used for dynamically variable block sizes is designed to expand
+ to a compile time constant whenever possible but will expand to
+ conditional clauses on some branches (I am grateful to Frank
+ Yellin for this construction)
+*/
+
+#define inv_var(x,r,c)\
+ ( r == 0 ? ( c == 0 ? s(x,0) : c == 1 ? s(x,1) : c == 2 ? s(x,2) : s(x,3))\
+ : r == 1 ? ( c == 0 ? s(x,3) : c == 1 ? s(x,0) : c == 2 ? s(x,1) : s(x,2))\
+ : r == 2 ? ( c == 0 ? s(x,2) : c == 1 ? s(x,3) : c == 2 ? s(x,0) : s(x,1))\
+ : ( c == 0 ? s(x,1) : c == 1 ? s(x,2) : c == 2 ? s(x,3) : s(x,0)))
+
+#if defined(IT4_SET)
+#undef dec_imvars
+#define inv_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(i,n),inv_var,rf1,c))
+#elif defined(IT1_SET)
+#undef dec_imvars
+#define inv_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,upr,t_use(i,n),inv_var,rf1,c))
+#else
+#define inv_rnd(y,x,k,c) (s(y,c) = inv_mcol((k)[c] ^ no_table(x,t_use(i,box),inv_var,rf1,c)))
+#endif
+
+#if defined(IL4_SET)
+#define inv_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(i,l),inv_var,rf1,c))
+#elif defined(IL1_SET)
+#define inv_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,ups,t_use(i,l),inv_var,rf1,c))
+#else
+#define inv_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ no_table(x,t_use(i,box),inv_var,rf1,c))
+#endif
+
+aes_rval aes_decrypt(const void *in_blk, void *out_blk, const aes_decrypt_ctx cx[1])
+{ aes_32t locals(b0, b1);
+#ifdef dec_imvars
+ dec_imvars; /* declare variables for inv_mcol() if needed */
+#endif
+
+ aes_32t nr = (cx->ks[45] ^ cx->ks[52] ^ cx->ks[53] ? cx->ks[52] : 14);
+ const aes_32t *kp = cx->ks + nr * N_COLS;
+
+#ifdef AES_ERR_CHK
+ if( (nr != 10 || !(cx->ks[0] | cx->ks[3] | cx->ks[4]))
+ && (nr != 12 || !(cx->ks[0] | cx->ks[5] | cx->ks[6]))
+ && (nr != 14 || !(cx->ks[0] | cx->ks[7] | cx->ks[8])) )
+ return aes_error;
+#endif
+
+ state_in(b0, in_blk, kp);
+
+#if (DEC_UNROLL == FULL)
+
+ switch(nr)
+ {
+ case 14:
+ round(inv_rnd, b1, b0, kp - 1 * N_COLS);
+ round(inv_rnd, b0, b1, kp - 2 * N_COLS);
+ kp -= 2 * N_COLS;
+ case 12:
+ round(inv_rnd, b1, b0, kp - 1 * N_COLS);
+ round(inv_rnd, b0, b1, kp - 2 * N_COLS);
+ kp -= 2 * N_COLS;
+ case 10:
+ round(inv_rnd, b1, b0, kp - 1 * N_COLS);
+ round(inv_rnd, b0, b1, kp - 2 * N_COLS);
+ round(inv_rnd, b1, b0, kp - 3 * N_COLS);
+ round(inv_rnd, b0, b1, kp - 4 * N_COLS);
+ round(inv_rnd, b1, b0, kp - 5 * N_COLS);
+ round(inv_rnd, b0, b1, kp - 6 * N_COLS);
+ round(inv_rnd, b1, b0, kp - 7 * N_COLS);
+ round(inv_rnd, b0, b1, kp - 8 * N_COLS);
+ round(inv_rnd, b1, b0, kp - 9 * N_COLS);
+ round(inv_lrnd, b0, b1, kp - 10 * N_COLS);
+ }
+
+#else
+
+#if (DEC_UNROLL == PARTIAL)
+ { aes_32t rnd;
+ for(rnd = 0; rnd < (nr >> 1) - 1; ++rnd)
+ {
+ kp -= N_COLS;
+ round(inv_rnd, b1, b0, kp);
+ kp -= N_COLS;
+ round(inv_rnd, b0, b1, kp);
+ }
+ kp -= N_COLS;
+ round(inv_rnd, b1, b0, kp);
+#else
+ { aes_32t rnd;
+ for(rnd = 0; rnd < nr - 1; ++rnd)
+ {
+ kp -= N_COLS;
+ round(inv_rnd, b1, b0, kp);
+ l_copy(b0, b1);
+ }
+#endif
+ kp -= N_COLS;
+ round(inv_lrnd, b0, b1, kp);
+ }
+#endif
+
+ state_out(out_blk, b0);
+#ifdef AES_ERR_CHK
+ return aes_good;
+#endif
+}
+
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
diff --git a/scripts/conf-w32brg/cipher/aeskey.c b/scripts/conf-w32brg/cipher/aeskey.c
new file mode 100644
index 000000000..acc1b8899
--- /dev/null
+++ b/scripts/conf-w32brg/cipher/aeskey.c
@@ -0,0 +1,463 @@
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman <[email protected]>, Worcester, UK.
+ All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+ 1. distributions of this source code include the above copyright
+ notice, this list of conditions and the following disclaimer;
+
+ 2. distributions in binary form include the above copyright
+ notice, this list of conditions and the following disclaimer
+ in the documentation and/or other associated materials;
+
+ 3. the copyright holder's name is not used to endorse products
+ built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue Date: 1/06/2003
+
+ This file contains the code for implementing the key schedule for AES
+ (Rijndael) for block and key sizes of 16, 24, and 32 bytes. See aesopt.h
+ for further details including optimisation.
+*/
+
+#include "aesopt.h"
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+/* Initialise the key schedule from the user supplied key. The key
+ length can be specified in bytes, with legal values of 16, 24
+ and 32, or in bits, with legal values of 128, 192 and 256. These
+ values correspond with Nk values of 4, 6 and 8 respectively.
+
+ The following macros implement a single cycle in the key
+ schedule generation process. The number of cycles needed
+ for each cx->n_col and nk value is:
+
+ nk = 4 5 6 7 8
+ ------------------------------
+ cx->n_col = 4 10 9 8 7 7
+ cx->n_col = 5 14 11 10 9 9
+ cx->n_col = 6 19 15 12 11 11
+ cx->n_col = 7 21 19 16 13 14
+ cx->n_col = 8 29 23 19 17 14
+*/
+
+#define ke4(k,i) \
+{ k[4*(i)+4] = ss[0] ^= ls_box(ss[3],3) ^ t_use(r,c)[i]; k[4*(i)+5] = ss[1] ^= ss[0]; \
+ k[4*(i)+6] = ss[2] ^= ss[1]; k[4*(i)+7] = ss[3] ^= ss[2]; \
+}
+#define kel4(k,i) \
+{ k[4*(i)+4] = ss[0] ^= ls_box(ss[3],3) ^ t_use(r,c)[i]; k[4*(i)+5] = ss[1] ^= ss[0]; \
+ k[4*(i)+6] = ss[2] ^= ss[1]; k[4*(i)+7] = ss[3] ^= ss[2]; \
+}
+
+#define ke6(k,i) \
+{ k[6*(i)+ 6] = ss[0] ^= ls_box(ss[5],3) ^ t_use(r,c)[i]; k[6*(i)+ 7] = ss[1] ^= ss[0]; \
+ k[6*(i)+ 8] = ss[2] ^= ss[1]; k[6*(i)+ 9] = ss[3] ^= ss[2]; \
+ k[6*(i)+10] = ss[4] ^= ss[3]; k[6*(i)+11] = ss[5] ^= ss[4]; \
+}
+#define kel6(k,i) \
+{ k[6*(i)+ 6] = ss[0] ^= ls_box(ss[5],3) ^ t_use(r,c)[i]; k[6*(i)+ 7] = ss[1] ^= ss[0]; \
+ k[6*(i)+ 8] = ss[2] ^= ss[1]; k[6*(i)+ 9] = ss[3] ^= ss[2]; \
+}
+
+#define ke8(k,i) \
+{ k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ t_use(r,c)[i]; k[8*(i)+ 9] = ss[1] ^= ss[0]; \
+ k[8*(i)+10] = ss[2] ^= ss[1]; k[8*(i)+11] = ss[3] ^= ss[2]; \
+ k[8*(i)+12] = ss[4] ^= ls_box(ss[3],0); k[8*(i)+13] = ss[5] ^= ss[4]; \
+ k[8*(i)+14] = ss[6] ^= ss[5]; k[8*(i)+15] = ss[7] ^= ss[6]; \
+}
+#define kel8(k,i) \
+{ k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ t_use(r,c)[i]; k[8*(i)+ 9] = ss[1] ^= ss[0]; \
+ k[8*(i)+10] = ss[2] ^= ss[1]; k[8*(i)+11] = ss[3] ^= ss[2]; \
+}
+
+#if defined(ENCRYPTION_KEY_SCHEDULE)
+
+#if defined(AES_128) || defined(AES_VAR)
+
+aes_rval aes_encrypt_key128(const void *in_key, aes_encrypt_ctx cx[1])
+{ aes_32t ss[4];
+
+ cx->ks[0] = ss[0] = word_in(in_key, 0);
+ cx->ks[1] = ss[1] = word_in(in_key, 1);
+ cx->ks[2] = ss[2] = word_in(in_key, 2);
+ cx->ks[3] = ss[3] = word_in(in_key, 3);
+
+#if ENC_UNROLL == NONE
+ { aes_32t i;
+
+ for(i = 0; i < ((11 * N_COLS - 1) / 4); ++i)
+ ke4(cx->ks, i);
+ }
+#else
+ ke4(cx->ks, 0); ke4(cx->ks, 1);
+ ke4(cx->ks, 2); ke4(cx->ks, 3);
+ ke4(cx->ks, 4); ke4(cx->ks, 5);
+ ke4(cx->ks, 6); ke4(cx->ks, 7);
+ ke4(cx->ks, 8); kel4(cx->ks, 9);
+#endif
+
+ /* cx->ks[45] ^ cx->ks[52] ^ cx->ks[53] is zero for a 256 bit */
+ /* key and must be non-zero for 128 and 192 bits keys */
+ cx->ks[53] = cx->ks[45] = 0;
+ cx->ks[52] = 10;
+#ifdef AES_ERR_CHK
+ return aes_good;
+#endif
+}
+
+#endif
+
+#if defined(AES_192) || defined(AES_VAR)
+
+aes_rval aes_encrypt_key192(const void *in_key, aes_encrypt_ctx cx[1])
+{ aes_32t ss[6];
+
+ cx->ks[0] = ss[0] = word_in(in_key, 0);
+ cx->ks[1] = ss[1] = word_in(in_key, 1);
+ cx->ks[2] = ss[2] = word_in(in_key, 2);
+ cx->ks[3] = ss[3] = word_in(in_key, 3);
+ cx->ks[4] = ss[4] = word_in(in_key, 4);
+ cx->ks[5] = ss[5] = word_in(in_key, 5);
+
+#if ENC_UNROLL == NONE
+ { aes_32t i;
+
+ for(i = 0; i < (13 * N_COLS - 1) / 6; ++i)
+ ke6(cx->ks, i);
+ }
+#else
+ ke6(cx->ks, 0); ke6(cx->ks, 1);
+ ke6(cx->ks, 2); ke6(cx->ks, 3);
+ ke6(cx->ks, 4); ke6(cx->ks, 5);
+ ke6(cx->ks, 6); kel6(cx->ks, 7);
+#endif
+
+ /* cx->ks[45] ^ cx->ks[52] ^ cx->ks[53] is zero for a 256 bit */
+ /* key and must be non-zero for 128 and 192 bits keys */
+ cx->ks[53] = cx->ks[45];
+ cx->ks[52] = 12;
+#ifdef AES_ERR_CHK
+ return aes_good;
+#endif
+}
+
+#endif
+
+#if defined(AES_256) || defined(AES_VAR)
+
+aes_rval aes_encrypt_key256(const void *in_key, aes_encrypt_ctx cx[1])
+{ aes_32t ss[8];
+
+ cx->ks[0] = ss[0] = word_in(in_key, 0);
+ cx->ks[1] = ss[1] = word_in(in_key, 1);
+ cx->ks[2] = ss[2] = word_in(in_key, 2);
+ cx->ks[3] = ss[3] = word_in(in_key, 3);
+ cx->ks[4] = ss[4] = word_in(in_key, 4);
+ cx->ks[5] = ss[5] = word_in(in_key, 5);
+ cx->ks[6] = ss[6] = word_in(in_key, 6);
+ cx->ks[7] = ss[7] = word_in(in_key, 7);
+
+#if ENC_UNROLL == NONE
+ { aes_32t i;
+
+ for(i = 0; i < (15 * N_COLS - 1) / 8; ++i)
+ ke8(cx->ks, i);
+ }
+#else
+ ke8(cx->ks, 0); ke8(cx->ks, 1);
+ ke8(cx->ks, 2); ke8(cx->ks, 3);
+ ke8(cx->ks, 4); ke8(cx->ks, 5);
+ kel8(cx->ks, 6);
+#endif
+#ifdef AES_ERR_CHK
+ return aes_good;
+#endif
+}
+
+#endif
+
+#if defined(AES_VAR)
+
+aes_rval aes_encrypt_key(const void *in_key, int key_len, aes_encrypt_ctx cx[1])
+{
+ switch(key_len)
+ {
+#ifdef AES_ERR_CHK
+ case 16: case 128: return aes_encrypt_key128(in_key, cx);
+ case 24: case 192: return aes_encrypt_key192(in_key, cx);
+ case 32: case 256: return aes_encrypt_key256(in_key, cx);
+ default: return aes_error;
+#else
+ case 16: case 128: aes_encrypt_key128(in_key, cx); return;
+ case 24: case 192: aes_encrypt_key192(in_key, cx); return;
+ case 32: case 256: aes_encrypt_key256(in_key, cx); return;
+#endif
+ }
+}
+
+#endif
+
+#endif
+
+#if defined(DECRYPTION_KEY_SCHEDULE)
+
+#if DEC_ROUND == NO_TABLES
+#define ff(x) (x)
+#else
+#define ff(x) inv_mcol(x)
+#ifdef dec_imvars
+#define d_vars dec_imvars
+#endif
+#endif
+
+#if 1
+#define kdf4(k,i) \
+{ ss[0] = ss[0] ^ ss[2] ^ ss[1] ^ ss[3]; ss[1] = ss[1] ^ ss[3]; ss[2] = ss[2] ^ ss[3]; ss[3] = ss[3]; \
+ ss[4] = ls_box(ss[(i+3) % 4], 3) ^ t_use(r,c)[i]; ss[i % 4] ^= ss[4]; \
+ ss[4] ^= k[4*(i)]; k[4*(i)+4] = ff(ss[4]); ss[4] ^= k[4*(i)+1]; k[4*(i)+5] = ff(ss[4]); \
+ ss[4] ^= k[4*(i)+2]; k[4*(i)+6] = ff(ss[4]); ss[4] ^= k[4*(i)+3]; k[4*(i)+7] = ff(ss[4]); \
+}
+#define kd4(k,i) \
+{ ss[4] = ls_box(ss[(i+3) % 4], 3) ^ t_use(r,c)[i]; ss[i % 4] ^= ss[4]; ss[4] = ff(ss[4]); \
+ k[4*(i)+4] = ss[4] ^= k[4*(i)]; k[4*(i)+5] = ss[4] ^= k[4*(i)+1]; \
+ k[4*(i)+6] = ss[4] ^= k[4*(i)+2]; k[4*(i)+7] = ss[4] ^= k[4*(i)+3]; \
+}
+#define kdl4(k,i) \
+{ ss[4] = ls_box(ss[(i+3) % 4], 3) ^ t_use(r,c)[i]; ss[i % 4] ^= ss[4]; \
+ k[4*(i)+4] = (ss[0] ^= ss[1]) ^ ss[2] ^ ss[3]; k[4*(i)+5] = ss[1] ^ ss[3]; \
+ k[4*(i)+6] = ss[0]; k[4*(i)+7] = ss[1]; \
+}
+#else
+#define kdf4(k,i) \
+{ ss[0] ^= ls_box(ss[3],3) ^ t_use(r,c)[i]; k[4*(i)+ 4] = ff(ss[0]); ss[1] ^= ss[0]; k[4*(i)+ 5] = ff(ss[1]); \
+ ss[2] ^= ss[1]; k[4*(i)+ 6] = ff(ss[2]); ss[3] ^= ss[2]; k[4*(i)+ 7] = ff(ss[3]); \
+}
+#define kd4(k,i) \
+{ ss[4] = ls_box(ss[3],3) ^ t_use(r,c)[i]; \
+ ss[0] ^= ss[4]; ss[4] = ff(ss[4]); k[4*(i)+ 4] = ss[4] ^= k[4*(i)]; \
+ ss[1] ^= ss[0]; k[4*(i)+ 5] = ss[4] ^= k[4*(i)+ 1]; \
+ ss[2] ^= ss[1]; k[4*(i)+ 6] = ss[4] ^= k[4*(i)+ 2]; \
+ ss[3] ^= ss[2]; k[4*(i)+ 7] = ss[4] ^= k[4*(i)+ 3]; \
+}
+#define kdl4(k,i) \
+{ ss[0] ^= ls_box(ss[3],3) ^ t_use(r,c)[i]; k[4*(i)+ 4] = ss[0]; ss[1] ^= ss[0]; k[4*(i)+ 5] = ss[1]; \
+ ss[2] ^= ss[1]; k[4*(i)+ 6] = ss[2]; ss[3] ^= ss[2]; k[4*(i)+ 7] = ss[3]; \
+}
+#endif
+
+#define kdf6(k,i) \
+{ ss[0] ^= ls_box(ss[5],3) ^ t_use(r,c)[i]; k[6*(i)+ 6] = ff(ss[0]); ss[1] ^= ss[0]; k[6*(i)+ 7] = ff(ss[1]); \
+ ss[2] ^= ss[1]; k[6*(i)+ 8] = ff(ss[2]); ss[3] ^= ss[2]; k[6*(i)+ 9] = ff(ss[3]); \
+ ss[4] ^= ss[3]; k[6*(i)+10] = ff(ss[4]); ss[5] ^= ss[4]; k[6*(i)+11] = ff(ss[5]); \
+}
+#define kd6(k,i) \
+{ ss[6] = ls_box(ss[5],3) ^ t_use(r,c)[i]; \
+ ss[0] ^= ss[6]; ss[6] = ff(ss[6]); k[6*(i)+ 6] = ss[6] ^= k[6*(i)]; \
+ ss[1] ^= ss[0]; k[6*(i)+ 7] = ss[6] ^= k[6*(i)+ 1]; \
+ ss[2] ^= ss[1]; k[6*(i)+ 8] = ss[6] ^= k[6*(i)+ 2]; \
+ ss[3] ^= ss[2]; k[6*(i)+ 9] = ss[6] ^= k[6*(i)+ 3]; \
+ ss[4] ^= ss[3]; k[6*(i)+10] = ss[6] ^= k[6*(i)+ 4]; \
+ ss[5] ^= ss[4]; k[6*(i)+11] = ss[6] ^= k[6*(i)+ 5]; \
+}
+#define kdl6(k,i) \
+{ ss[0] ^= ls_box(ss[5],3) ^ t_use(r,c)[i]; k[6*(i)+ 6] = ss[0]; ss[1] ^= ss[0]; k[6*(i)+ 7] = ss[1]; \
+ ss[2] ^= ss[1]; k[6*(i)+ 8] = ss[2]; ss[3] ^= ss[2]; k[6*(i)+ 9] = ss[3]; \
+}
+
+#define kdf8(k,i) \
+{ ss[0] ^= ls_box(ss[7],3) ^ t_use(r,c)[i]; k[8*(i)+ 8] = ff(ss[0]); ss[1] ^= ss[0]; k[8*(i)+ 9] = ff(ss[1]); \
+ ss[2] ^= ss[1]; k[8*(i)+10] = ff(ss[2]); ss[3] ^= ss[2]; k[8*(i)+11] = ff(ss[3]); \
+ ss[4] ^= ls_box(ss[3],0); k[8*(i)+12] = ff(ss[4]); ss[5] ^= ss[4]; k[8*(i)+13] = ff(ss[5]); \
+ ss[6] ^= ss[5]; k[8*(i)+14] = ff(ss[6]); ss[7] ^= ss[6]; k[8*(i)+15] = ff(ss[7]); \
+}
+#define kd8(k,i) \
+{ aes_32t g = ls_box(ss[7],3) ^ t_use(r,c)[i]; \
+ ss[0] ^= g; g = ff(g); k[8*(i)+ 8] = g ^= k[8*(i)]; \
+ ss[1] ^= ss[0]; k[8*(i)+ 9] = g ^= k[8*(i)+ 1]; \
+ ss[2] ^= ss[1]; k[8*(i)+10] = g ^= k[8*(i)+ 2]; \
+ ss[3] ^= ss[2]; k[8*(i)+11] = g ^= k[8*(i)+ 3]; \
+ g = ls_box(ss[3],0); \
+ ss[4] ^= g; g = ff(g); k[8*(i)+12] = g ^= k[8*(i)+ 4]; \
+ ss[5] ^= ss[4]; k[8*(i)+13] = g ^= k[8*(i)+ 5]; \
+ ss[6] ^= ss[5]; k[8*(i)+14] = g ^= k[8*(i)+ 6]; \
+ ss[7] ^= ss[6]; k[8*(i)+15] = g ^= k[8*(i)+ 7]; \
+}
+#define kdl8(k,i) \
+{ ss[0] ^= ls_box(ss[7],3) ^ t_use(r,c)[i]; k[8*(i)+ 8] = ss[0]; ss[1] ^= ss[0]; k[8*(i)+ 9] = ss[1]; \
+ ss[2] ^= ss[1]; k[8*(i)+10] = ss[2]; ss[3] ^= ss[2]; k[8*(i)+11] = ss[3]; \
+}
+
+#if defined(AES_128) || defined(AES_VAR)
+
+aes_rval aes_decrypt_key128(const void *in_key, aes_decrypt_ctx cx[1])
+{ aes_32t ss[5];
+#ifdef d_vars
+ d_vars;
+#endif
+ cx->ks[0] = ss[0] = word_in(in_key, 0);
+ cx->ks[1] = ss[1] = word_in(in_key, 1);
+ cx->ks[2] = ss[2] = word_in(in_key, 2);
+ cx->ks[3] = ss[3] = word_in(in_key, 3);
+
+#if DEC_UNROLL == NONE
+ { aes_32t i;
+
+ for(i = 0; i < (11 * N_COLS - 1) / 4; ++i)
+ ke4(cx->ks, i);
+#if !(DEC_ROUND == NO_TABLES)
+ for(i = N_COLS; i < 10 * N_COLS; ++i)
+ cx->ks[i] = inv_mcol(cx->ks[i]);
+#endif
+ }
+#else
+ kdf4(cx->ks, 0); kd4(cx->ks, 1);
+ kd4(cx->ks, 2); kd4(cx->ks, 3);
+ kd4(cx->ks, 4); kd4(cx->ks, 5);
+ kd4(cx->ks, 6); kd4(cx->ks, 7);
+ kd4(cx->ks, 8); kdl4(cx->ks, 9);
+#endif
+
+ /* cx->ks[45] ^ cx->ks[52] ^ cx->ks[53] is zero for a 256 bit */
+ /* key and must be non-zero for 128 and 192 bits keys */
+ cx->ks[53] = cx->ks[45] = 0;
+ cx->ks[52] = 10;
+#ifdef AES_ERR_CHK
+ return aes_good;
+#endif
+}
+
+#endif
+
+#if defined(AES_192) || defined(AES_VAR)
+
+aes_rval aes_decrypt_key192(const void *in_key, aes_decrypt_ctx cx[1])
+{ aes_32t ss[7];
+#ifdef d_vars
+ d_vars;
+#endif
+ cx->ks[0] = ss[0] = word_in(in_key, 0);
+ cx->ks[1] = ss[1] = word_in(in_key, 1);
+ cx->ks[2] = ss[2] = word_in(in_key, 2);
+ cx->ks[3] = ss[3] = word_in(in_key, 3);
+
+#if DEC_UNROLL == NONE
+ cx->ks[4] = ss[4] = word_in(in_key, 4);
+ cx->ks[5] = ss[5] = word_in(in_key, 5);
+ { aes_32t i;
+
+ for(i = 0; i < (13 * N_COLS - 1) / 6; ++i)
+ ke6(cx->ks, i);
+#if !(DEC_ROUND == NO_TABLES)
+ for(i = N_COLS; i < 12 * N_COLS; ++i)
+ cx->ks[i] = inv_mcol(cx->ks[i]);
+#endif
+ }
+#else
+ cx->ks[4] = ff(ss[4] = word_in(in_key, 4));
+ cx->ks[5] = ff(ss[5] = word_in(in_key, 5));
+ kdf6(cx->ks, 0); kd6(cx->ks, 1);
+ kd6(cx->ks, 2); kd6(cx->ks, 3);
+ kd6(cx->ks, 4); kd6(cx->ks, 5);
+ kd6(cx->ks, 6); kdl6(cx->ks, 7);
+#endif
+
+ /* cx->ks[45] ^ cx->ks[52] ^ cx->ks[53] is zero for a 256 bit */
+ /* key and must be non-zero for 128 and 192 bits keys */
+ cx->ks[53] = cx->ks[45];
+ cx->ks[52] = 12;
+#ifdef AES_ERR_CHK
+ return aes_good;
+#endif
+}
+
+#endif
+
+#if defined(AES_256) || defined(AES_VAR)
+
+aes_rval aes_decrypt_key256(const void *in_key, aes_decrypt_ctx cx[1])
+{ aes_32t ss[8];
+#ifdef d_vars
+ d_vars;
+#endif
+ cx->ks[0] = ss[0] = word_in(in_key, 0);
+ cx->ks[1] = ss[1] = word_in(in_key, 1);
+ cx->ks[2] = ss[2] = word_in(in_key, 2);
+ cx->ks[3] = ss[3] = word_in(in_key, 3);
+
+#if DEC_UNROLL == NONE
+ cx->ks[4] = ss[4] = word_in(in_key, 4);
+ cx->ks[5] = ss[5] = word_in(in_key, 5);
+ cx->ks[6] = ss[6] = word_in(in_key, 6);
+ cx->ks[7] = ss[7] = word_in(in_key, 7);
+ { aes_32t i;
+
+ for(i = 0; i < (15 * N_COLS - 1) / 8; ++i)
+ ke8(cx->ks, i);
+#if !(DEC_ROUND == NO_TABLES)
+ for(i = N_COLS; i < 14 * N_COLS; ++i)
+ cx->ks[i] = inv_mcol(cx->ks[i]);
+#endif
+ }
+#else
+ cx->ks[4] = ff(ss[4] = word_in(in_key, 4));
+ cx->ks[5] = ff(ss[5] = word_in(in_key, 5));
+ cx->ks[6] = ff(ss[6] = word_in(in_key, 6));
+ cx->ks[7] = ff(ss[7] = word_in(in_key, 7));
+ kdf8(cx->ks, 0); kd8(cx->ks, 1);
+ kd8(cx->ks, 2); kd8(cx->ks, 3);
+ kd8(cx->ks, 4); kd8(cx->ks, 5);
+ kdl8(cx->ks, 6);
+#endif
+#ifdef AES_ERR_CHK
+ return aes_good;
+#endif
+}
+
+#endif
+
+#if defined(AES_VAR)
+
+aes_rval aes_decrypt_key(const void *in_key, int key_len, aes_decrypt_ctx cx[1])
+{
+ switch(key_len)
+ {
+#ifdef AES_ERR_CHK
+ case 16: case 128: return aes_decrypt_key128(in_key, cx);
+ case 24: case 192: return aes_decrypt_key192(in_key, cx);
+ case 32: case 256: return aes_decrypt_key256(in_key, cx);
+ default: return aes_error;
+#else
+ case 16: case 128: aes_decrypt_key128(in_key, cx); return;
+ case 24: case 192: aes_decrypt_key192(in_key, cx); return;
+ case 32: case 256: aes_decrypt_key256(in_key, cx); return;
+#endif
+ }
+}
+
+#endif
+
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
diff --git a/scripts/conf-w32brg/cipher/aesopt.h b/scripts/conf-w32brg/cipher/aesopt.h
new file mode 100644
index 000000000..ba94f97b3
--- /dev/null
+++ b/scripts/conf-w32brg/cipher/aesopt.h
@@ -0,0 +1,1042 @@
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman <[email protected]>, Worcester, UK.
+ All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+ 1. distributions of this source code include the above copyright
+ notice, this list of conditions and the following disclaimer;
+
+ 2. distributions in binary form include the above copyright
+ notice, this list of conditions and the following disclaimer
+ in the documentation and/or other associated materials;
+
+ 3. the copyright holder's name is not used to endorse products
+ built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue Date: 1/06/2003
+
+ My thanks go to Dag Arne Osvik for devising the schemes used here for key
+ length derivation from the form of the key schedule
+
+ This file contains the compilation options for AES (Rijndael) and code
+ that is common across encryption, key scheduling and table generation.
+
+ OPERATION
+
+ These source code files implement the AES algorithm Rijndael designed by
+ Joan Daemen and Vincent Rijmen. This version is designed for the standard
+ block size of 16 bytes and for key sizes of 128, 192 and 256 bits (16, 24
+ and 32 bytes).
+
+ This version is designed for flexibility and speed using operations on
+ 32-bit words rather than operations on bytes. It can be compiled with
+ either big or little endian internal byte order but is faster when the
+ native byte order for the processor is used.
+
+ THE CIPHER INTERFACE
+
+ The cipher interface is implemented as an array of bytes in which lower
+ AES bit sequence indexes map to higher numeric significance within bytes.
+
+ aes_08t (an unsigned 8-bit type)
+ aes_32t (an unsigned 32-bit type)
+ struct aes_encrypt_ctx (structure for the cipher encryption context)
+ struct aes_decrypt_ctx (structure for the cipher decryption context)
+ aes_rval the function return type
+
+ C subroutine calls:
+
+ aes_rval aes_encrypt_key128(const void *in_key, aes_encrypt_ctx cx[1]);
+ aes_rval aes_encrypt_key192(const void *in_key, aes_encrypt_ctx cx[1]);
+ aes_rval aes_encrypt_key256(const void *in_key, aes_encrypt_ctx cx[1]);
+ aes_rval aes_encrypt(const void *in_blk,
+ void *out_blk, const aes_encrypt_ctx cx[1]);
+
+ aes_rval aes_decrypt_key128(const void *in_key, aes_decrypt_ctx cx[1]);
+ aes_rval aes_decrypt_key192(const void *in_key, aes_decrypt_ctx cx[1]);
+ aes_rval aes_decrypt_key256(const void *in_key, aes_decrypt_ctx cx[1]);
+ aes_rval aes_decrypt(const void *in_blk,
+ void *out_blk, const aes_decrypt_ctx cx[1]);
+
+ IMPORTANT NOTE: If you are using this C interface with dynamic tables make sure that
+ you call genTabs() before AES is used so that the tables are initialised.
+
+ C++ aes class subroutines:
+
+ Class AESencrypt for encryption
+
+ Construtors:
+ AESencrypt(void)
+ AESencrypt(const void *in_key) - 128 bit key
+ Members:
+ void key128(const void *in_key)
+ void key192(const void *in_key)
+ void key256(const void *in_key)
+ void encrypt(const void *in_blk, void *out_blk) const
+
+ Class AESdecrypt for encryption
+ Construtors:
+ AESdecrypt(void)
+ AESdecrypt(const void *in_key) - 128 bit key
+ Members:
+ void key128(const void *in_key)
+ void key192(const void *in_key)
+ void key256(const void *in_key)
+ void decrypt(const void *in_blk, void *out_blk) const
+
+ COMPILATION
+
+ The files used to provide AES (Rijndael) are
+
+ a. aes.h for the definitions needed for use in C.
+ b. aescpp.h for the definitions needed for use in C++.
+ c. aesopt.h for setting compilation options (also includes common code).
+ d. aescrypt.c for encryption and decrytpion, or
+ e. aeskey.c for key scheduling.
+ f. aestab.c for table loading or generation.
+ g. aescrypt.asm for encryption and decryption using assembler code.
+ h. aescrypt.mmx.asm for encryption and decryption using MMX assembler.
+
+ To compile AES (Rijndael) for use in C code use aes.h and set the
+ defines here for the facilities you need (key lengths, encryption
+ and/or decryption). Do not define AES_DLL or AES_CPP. Set the options
+ for optimisations and table sizes here.
+
+ To compile AES (Rijndael) for use in in C++ code use aescpp.h but do
+ not define AES_DLL
+
+ To compile AES (Rijndael) in C as a Dynamic Link Library DLL) use
+ aes.h and include the AES_DLL define.
+
+ CONFIGURATION OPTIONS (here and in aes.h)
+
+ a. set AES_DLL in aes.h if AES (Rijndael) is to be compiled as a DLL
+ b. You may need to set PLATFORM_BYTE_ORDER to define the byte order.
+ c. If you want the code to run in a specific internal byte order, then
+ INTERNAL_BYTE_ORDER must be set accordingly.
+ d. set other configuration options decribed below.
+*/
+
+#ifndef _AESOPT_H
+#define _AESOPT_H
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+/* START OF CONFIGURATION OPTIONS
+
+ USE OF DEFINES
+
+ Later in this section there are a number of defines that control the
+ operation of the code. In each section, the purpose of each define is
+ explained so that the relevant form can be included or excluded by
+ setting either 1's or 0's respectively on the branches of the related
+ #if clauses.
+*/
+
+/* DO NOT CHANGE THE FOLLOWING EIGHT DEFINES */
+
+#define NO_TABLES 0
+#define ONE_TABLE 1
+#define FOUR_TABLES 4
+#define NONE 0
+#define PARTIAL 1
+#define FULL 2
+#define AES_LITTLE_ENDIAN 1234 /* byte 0 is least significant (i386) */
+#define AES_BIG_ENDIAN 4321 /* byte 0 is most significant (mc68k) */
+
+/* 1. PLATFORM SPECIFIC INCLUDES */
+
+#if defined( __CRYPTLIB__ ) && !defined( INC_ALL ) && !defined( INC_CHILD )
+#include "crypt/aes.h"
+#else
+ #include "aes.h"
+#endif
+
+#if defined(__GNUC__) || defined(__GNU_LIBRARY__)
+# if defined( __FreeBSD__ ) || defined( __OpenBSD__ )
+# include <sys/endian.h>
+# elif defined( __APPLE__ )
+# if defined( __BIG_ENDIAN__ ) && !defined( BIG_ENDIAN )
+# define BIG_ENDIAN
+# elif defined( __LITTLE_ENDIAN__ ) && !defined( LITTLE_ENDIAN )
+# define LITTLE_ENDIAN
+# else
+# error Need to define CPU endianness for OS X
+# endif
+# else
+# include <endian.h>
+# include <byteswap.h>
+# endif /* *BSDs don't use standard Gnu setup */
+#elif defined(__CRYPTLIB__)
+# if defined( INC_ALL )
+# include "crypt.h"
+# elif defined( INC_CHILD )
+# include "../crypt.h"
+# else
+# include "crypt.h"
+# endif
+# if defined(DATA_LITTLEENDIAN)
+# define PLATFORM_BYTE_ORDER AES_LITTLE_ENDIAN
+# else
+# define PLATFORM_BYTE_ORDER AES_BIG_ENDIAN
+# endif
+#elif defined(_MSC_VER)
+# include <stdlib.h>
+#elif !defined(WIN32)
+# include <stdlib.h>
+# if defined (_ENDIAN_H)
+# include "endian.h"
+# else
+# include <sys/param.h>
+# endif
+#endif
+
+#if defined(bswap32)
+#define aes_sw32 bswap32
+#elif defined(bswap_32)
+#define aes_sw32 bswap_32
+#endif
+
+/* 2. BYTE ORDER IN 32-BIT WORDS
+
+ To obtain the highest speed on processors with 32-bit words, this code
+ needs to determine the order in which bytes are packed into such words.
+ The following block of code is an attempt to capture the most obvious
+ ways in which various environemnts define byte order. It may well fail,
+ in which case the definitions will need to be set by editing at the
+ points marked **** EDIT HERE IF NECESSARY **** below.
+*/
+#if !defined(PLATFORM_BYTE_ORDER)
+#if defined(LITTLE_ENDIAN) || defined(BIG_ENDIAN)
+# if defined(LITTLE_ENDIAN) && defined(BIG_ENDIAN)
+# if defined(BYTE_ORDER)
+# if (BYTE_ORDER == LITTLE_ENDIAN)
+# define PLATFORM_BYTE_ORDER AES_LITTLE_ENDIAN
+# elif (BYTE_ORDER == BIG_ENDIAN)
+# define PLATFORM_BYTE_ORDER AES_BIG_ENDIAN
+# endif
+# endif
+# elif defined(LITTLE_ENDIAN) && !defined(BIG_ENDIAN)
+# define PLATFORM_BYTE_ORDER AES_LITTLE_ENDIAN
+# elif !defined(LITTLE_ENDIAN) && defined(BIG_ENDIAN)
+# define PLATFORM_BYTE_ORDER AES_BIG_ENDIAN
+# endif
+#elif defined(_LITTLE_ENDIAN) || defined(_BIG_ENDIAN)
+# if defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN)
+# if defined(_BYTE_ORDER)
+# if (_BYTE_ORDER == _LITTLE_ENDIAN)
+# define PLATFORM_BYTE_ORDER AES_LITTLE_ENDIAN
+# elif (_BYTE_ORDER == _BIG_ENDIAN)
+# define PLATFORM_BYTE_ORDER AES_BIG_ENDIAN
+# endif
+# endif
+# elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN)
+# define PLATFORM_BYTE_ORDER AES_LITTLE_ENDIAN
+# elif !defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN)
+# define PLATFORM_BYTE_ORDER AES_BIG_ENDIAN
+# endif
+#elif 0 /* **** EDIT HERE IF NECESSARY **** */
+#define PLATFORM_BYTE_ORDER AES_LITTLE_ENDIAN
+#elif 0 /* **** EDIT HERE IF NECESSARY **** */
+#define PLATFORM_BYTE_ORDER AES_BIG_ENDIAN
+#elif (('1234' >> 24) == '1')
+# define PLATFORM_BYTE_ORDER AES_LITTLE_ENDIAN
+#elif (('4321' >> 24) == '1')
+# define PLATFORM_BYTE_ORDER AES_BIG_ENDIAN
+#endif
+#endif
+
+#if !defined(PLATFORM_BYTE_ORDER)
+# error Please set undetermined byte order (lines 241 or 243 of aesopt.h).
+#endif
+
+/* 3. FUNCTIONS REQUIRED
+
+ This implementation provides subroutines for encryption, decryption
+ and for setting the three key lengths (separately) for encryption
+ and decryption. When the assembler code is not being used the following
+ definition blocks allow the selection of the routines that are to be
+ included in the compilation.
+*/
+#ifdef AES_ENCRYPT
+#define ENCRYPTION
+#define ENCRYPTION_KEY_SCHEDULE
+#endif
+
+#ifdef AES_DECRYPT
+#define DECRYPTION
+#define DECRYPTION_KEY_SCHEDULE
+#endif
+
+/* 4. ASSEMBLER SUPPORT
+
+ This define (which can be on the command line) enables the use of the
+ assembler code routines for encryption and decryption with the C code
+ only providing key scheduling
+*/
+#if 0
+#define AES_ASM
+#endif
+
+/* 5. BYTE ORDER WITHIN 32 BIT WORDS
+
+ The fundamental data processing units in Rijndael are 8-bit bytes. The
+ input, output and key input are all enumerated arrays of bytes in which
+ bytes are numbered starting at zero and increasing to one less than the
+ number of bytes in the array in question. This enumeration is only used
+ for naming bytes and does not imply any adjacency or order relationship
+ from one byte to another. When these inputs and outputs are considered
+ as bit sequences, bits 8*n to 8*n+7 of the bit sequence are mapped to
+ byte[n] with bit 8n+i in the sequence mapped to bit 7-i within the byte.
+ In this implementation bits are numbered from 0 to 7 starting at the
+ numerically least significant end of each byte (bit n represents 2^n).
+
+ However, Rijndael can be implemented more efficiently using 32-bit
+ words by packing bytes into words so that bytes 4*n to 4*n+3 are placed
+ into word[n]. While in principle these bytes can be assembled into words
+ in any positions, this implementation only supports the two formats in
+ which bytes in adjacent positions within words also have adjacent byte
+ numbers. This order is called big-endian if the lowest numbered bytes
+ in words have the highest numeric significance and little-endian if the
+ opposite applies.
+
+ This code can work in either order irrespective of the order used by the
+ machine on which it runs. Normally the internal byte order will be set
+ to the order of the processor on which the code is to be run but this
+ define can be used to reverse this in special situations
+
+ NOTE: Assembler code versions rely on PLATFORM_BYTE_ORDER being set
+*/
+#if 1 || defined(AES_ASM)
+#define INTERNAL_BYTE_ORDER PLATFORM_BYTE_ORDER
+#elif 0
+#define INTERNAL_BYTE_ORDER AES_LITTLE_ENDIAN
+#elif 0
+#define INTERNAL_BYTE_ORDER AES_BIG_ENDIAN
+#else
+#error The internal byte order is not defined
+#endif
+
+/* 6. FAST INPUT/OUTPUT OPERATIONS.
+
+ On some machines it is possible to improve speed by transferring the
+ bytes in the input and output arrays to and from the internal 32-bit
+ variables by addressing these arrays as if they are arrays of 32-bit
+ words. On some machines this will always be possible but there may
+ be a large performance penalty if the byte arrays are not aligned on
+ the normal word boundaries. On other machines this technique will
+ lead to memory access errors when such 32-bit word accesses are not
+ properly aligned. The option SAFE_IO avoids such problems but will
+ often be slower on those machines that support misaligned access
+ (especially so if care is taken to align the input and output byte
+ arrays on 32-bit word boundaries). If SAFE_IO is not defined it is
+ assumed that access to byte arrays as if they are arrays of 32-bit
+ words will not cause problems when such accesses are misaligned.
+*/
+#if 1 && !defined(_MSC_VER)
+#define SAFE_IO
+#endif
+
+/* 7. LOOP UNROLLING
+
+ The code for encryption and decrytpion cycles through a number of rounds
+ that can be implemented either in a loop or by expanding the code into a
+ long sequence of instructions, the latter producing a larger program but
+ one that will often be much faster. The latter is called loop unrolling.
+ There are also potential speed advantages in expanding two iterations in
+ a loop with half the number of iterations, which is called partial loop
+ unrolling. The following options allow partial or full loop unrolling
+ to be set independently for encryption and decryption
+*/
+#if 1
+#define ENC_UNROLL FULL
+#elif 0
+#define ENC_UNROLL PARTIAL
+#else
+#define ENC_UNROLL NONE
+#endif
+
+#if 1
+#define DEC_UNROLL FULL
+#elif 0
+#define DEC_UNROLL PARTIAL
+#else
+#define DEC_UNROLL NONE
+#endif
+
+/* 8. FAST FINITE FIELD OPERATIONS
+
+ If this section is included, tables are used to provide faster finite
+ field arithmetic (this has no effect if FIXED_TABLES is defined).
+*/
+#if 1
+#define FF_TABLES
+#endif
+
+/* 9. INTERNAL STATE VARIABLE FORMAT
+
+ The internal state of Rijndael is stored in a number of local 32-bit
+ word varaibles which can be defined either as an array or as individual
+ names variables. Include this section if you want to store these local
+ varaibles in arrays. Otherwise individual local variables will be used.
+*/
+#if 1
+#define ARRAYS
+#endif
+
+/* In this implementation the columns of the state array are each held in
+ 32-bit words. The state array can be held in various ways: in an array
+ of words, in a number of individual word variables or in a number of
+ processor registers. The following define maps a variable name x and
+ a column number c to the way the state array variable is to be held.
+ The first define below maps the state into an array x[c] whereas the
+ second form maps the state into a number of individual variables x0,
+ x1, etc. Another form could map individual state colums to machine
+ register names.
+*/
+
+#if defined(ARRAYS)
+#define s(x,c) x[c]
+#else
+#define s(x,c) x##c
+#endif
+
+/* 10. FIXED OR DYNAMIC TABLES
+
+ When this section is included the tables used by the code are compiled
+ statically into the binary file. Otherwise the subroutine gen_tabs()
+ must be called to compute them before the code is first used.
+*/
+#if 1
+#define FIXED_TABLES
+#endif
+
+/* 11. TABLE ALIGNMENT
+
+ On some sytsems speed will be improved by aligning the AES large lookup
+ tables on particular boundaries. This define should be set to a power of
+ two giving the desired alignment. It can be left undefined if alignment
+ is not needed. This option is specific to the Microsft VC++ compiler.
+*/
+
+#define TABLE_ALIGN 64
+
+/* 12. INTERNAL TABLE CONFIGURATION
+
+ This cipher proceeds by repeating in a number of cycles known as 'rounds'
+ which are implemented by a round function which can optionally be speeded
+ up using tables. The basic tables are each 256 32-bit words, with either
+ one or four tables being required for each round function depending on
+ how much speed is required. The encryption and decryption round functions
+ are different and the last encryption and decrytpion round functions are
+ different again making four different round functions in all.
+
+ This means that:
+ 1. Normal encryption and decryption rounds can each use either 0, 1
+ or 4 tables and table spaces of 0, 1024 or 4096 bytes each.
+ 2. The last encryption and decryption rounds can also use either 0, 1
+ or 4 tables and table spaces of 0, 1024 or 4096 bytes each.
+
+ Include or exclude the appropriate definitions below to set the number
+ of tables used by this implementation.
+*/
+
+#if 1 /* set tables for the normal encryption round */
+#define ENC_ROUND FOUR_TABLES
+#elif 0
+#define ENC_ROUND ONE_TABLE
+#else
+#define ENC_ROUND NO_TABLES
+#endif
+
+#if 1 /* set tables for the last encryption round */
+#define LAST_ENC_ROUND FOUR_TABLES
+#elif 0
+#define LAST_ENC_ROUND ONE_TABLE
+#else
+#define LAST_ENC_ROUND NO_TABLES
+#endif
+
+#if 1 /* set tables for the normal decryption round */
+#define DEC_ROUND FOUR_TABLES
+#elif 0
+#define DEC_ROUND ONE_TABLE
+#else
+#define DEC_ROUND NO_TABLES
+#endif
+
+#if 1 /* set tables for the last decryption round */
+#define LAST_DEC_ROUND FOUR_TABLES
+#elif 0
+#define LAST_DEC_ROUND ONE_TABLE
+#else
+#define LAST_DEC_ROUND NO_TABLES
+#endif
+
+/* The decryption key schedule can be speeded up with tables in the same
+ way that the round functions can. Include or exclude the following
+ defines to set this requirement.
+*/
+#if 1
+#define KEY_SCHED FOUR_TABLES
+#elif 0
+#define KEY_SCHED ONE_TABLE
+#else
+#define KEY_SCHED NO_TABLES
+#endif
+
+/* END OF CONFIGURATION OPTIONS */
+
+#define RC_LENGTH (5 * (AES_BLOCK_SIZE / 4 - 2))
+
+/* Disable or report errors on some combinations of options */
+
+#if ENC_ROUND == NO_TABLES && LAST_ENC_ROUND != NO_TABLES
+#undef LAST_ENC_ROUND
+#define LAST_ENC_ROUND NO_TABLES
+#elif ENC_ROUND == ONE_TABLE && LAST_ENC_ROUND == FOUR_TABLES
+#undef LAST_ENC_ROUND
+#define LAST_ENC_ROUND ONE_TABLE
+#endif
+
+#if ENC_ROUND == NO_TABLES && ENC_UNROLL != NONE
+#undef ENC_UNROLL
+#define ENC_UNROLL NONE
+#endif
+
+#if DEC_ROUND == NO_TABLES && LAST_DEC_ROUND != NO_TABLES
+#undef LAST_DEC_ROUND
+#define LAST_DEC_ROUND NO_TABLES
+#elif DEC_ROUND == ONE_TABLE && LAST_DEC_ROUND == FOUR_TABLES
+#undef LAST_DEC_ROUND
+#define LAST_DEC_ROUND ONE_TABLE
+#endif
+
+#if DEC_ROUND == NO_TABLES && DEC_UNROLL != NONE
+#undef DEC_UNROLL
+#define DEC_UNROLL NONE
+#endif
+
+/* upr(x,n): rotates bytes within words by n positions, moving bytes to
+ higher index positions with wrap around into low positions
+ ups(x,n): moves bytes by n positions to higher index positions in
+ words but without wrap around
+ bval(x,n): extracts a byte from a word
+
+ NOTE: The definitions given here are intended only for use with
+ unsigned variables and with shift counts that are compile
+ time constants
+*/
+
+#if (INTERNAL_BYTE_ORDER == AES_LITTLE_ENDIAN)
+#define upr(x,n) (((aes_32t)(x) << (8 * (n))) | ((aes_32t)(x) >> (32 - 8 * (n))))
+#define ups(x,n) ((aes_32t) (x) << (8 * (n)))
+#define bval(x,n) ((aes_08t)((x) >> (8 * (n))))
+#define bytes2word(b0, b1, b2, b3) \
+ (((aes_32t)(b3) << 24) | ((aes_32t)(b2) << 16) | ((aes_32t)(b1) << 8) | (b0))
+#endif
+
+#if (INTERNAL_BYTE_ORDER == AES_BIG_ENDIAN)
+#define upr(x,n) (((aes_32t)(x) >> (8 * (n))) | ((aes_32t)(x) << (32 - 8 * (n))))
+#define ups(x,n) ((aes_32t) (x) >> (8 * (n))))
+#define bval(x,n) ((aes_08t)((x) >> (24 - 8 * (n))))
+#define bytes2word(b0, b1, b2, b3) \
+ (((aes_32t)(b0) << 24) | ((aes_32t)(b1) << 16) | ((aes_32t)(b2) << 8) | (b3))
+#endif
+
+#if defined(SAFE_IO)
+
+#define word_in(x,c) bytes2word(((aes_08t*)(x)+4*c)[0], ((aes_08t*)(x)+4*c)[1], \
+ ((aes_08t*)(x)+4*c)[2], ((aes_08t*)(x)+4*c)[3])
+#define word_out(x,c,v) { ((aes_08t*)(x)+4*c)[0] = bval(v,0); ((aes_08t*)(x)+4*c)[1] = bval(v,1); \
+ ((aes_08t*)(x)+4*c)[2] = bval(v,2); ((aes_08t*)(x)+4*c)[3] = bval(v,3); }
+
+#elif (INTERNAL_BYTE_ORDER == PLATFORM_BYTE_ORDER)
+
+#define word_in(x,c) (*((aes_32t*)(x)+(c)))
+#define word_out(x,c,v) (*((aes_32t*)(x)+(c)) = (v))
+
+#else
+
+#ifndef aes_sw32
+#define brot(x,n) (((aes_32t)(x) << n) | ((aes_32t)(x) >> (32 - n)))
+#define aes_sw32(x) ((brot((x),8) & 0x00ff00ff) | (brot((x),24) & 0xff00ff00))
+#endif
+
+#define word_in(x,c) aes_sw32(*((aes_32t*)(x)+(c)))
+#define word_out(x,c,v) (*((aes_32t*)(x)+(c)) = aes_sw32(v))
+
+#endif
+
+/* the finite field modular polynomial and elements */
+
+#define WPOLY 0x011b
+#define BPOLY 0x1b
+
+/* multiply four bytes in GF(2^8) by 'x' {02} in parallel */
+
+#define m1 0x80808080
+#define m2 0x7f7f7f7f
+#define gf_mulx(x) ((((x) & m2) << 1) ^ ((((x) & m1) >> 7) * BPOLY))
+
+/* The following defines provide alternative definitions of gf_mulx that might
+ give improved performance if a fast 32-bit multiply is not available. Note
+ that a temporary variable u needs to be defined where gf_mulx is used.
+
+#define gf_mulx(x) (u = (x) & m1, u |= (u >> 1), ((x) & m2) << 1) ^ ((u >> 3) | (u >> 6))
+#define m4 (0x01010101 * BPOLY)
+#define gf_mulx(x) (u = (x) & m1, ((x) & m2) << 1) ^ ((u - (u >> 7)) & m4)
+*/
+
+/* Work out which tables are needed for the different options */
+
+#ifdef AES_ASM
+#ifdef ENC_ROUND
+#undef ENC_ROUND
+#endif
+#define ENC_ROUND FOUR_TABLES
+#ifdef LAST_ENC_ROUND
+#undef LAST_ENC_ROUND
+#endif
+#define LAST_ENC_ROUND FOUR_TABLES
+#ifdef DEC_ROUND
+#undef DEC_ROUND
+#endif
+#define DEC_ROUND FOUR_TABLES
+#ifdef LAST_DEC_ROUND
+#undef LAST_DEC_ROUND
+#endif
+#define LAST_DEC_ROUND FOUR_TABLES
+#ifdef KEY_SCHED
+#undef KEY_SCHED
+#define KEY_SCHED FOUR_TABLES
+#endif
+#endif
+
+#if defined(ENCRYPTION) || defined(AES_ASM)
+#if ENC_ROUND == ONE_TABLE
+#define FT1_SET
+#elif ENC_ROUND == FOUR_TABLES
+#define FT4_SET
+#else
+#define SBX_SET
+#endif
+#if LAST_ENC_ROUND == ONE_TABLE
+#define FL1_SET
+#elif LAST_ENC_ROUND == FOUR_TABLES
+#define FL4_SET
+#elif !defined(SBX_SET)
+#define SBX_SET
+#endif
+#endif
+
+#if defined(DECRYPTION) || defined(AES_ASM)
+#if DEC_ROUND == ONE_TABLE
+#define IT1_SET
+#elif DEC_ROUND == FOUR_TABLES
+#define IT4_SET
+#else
+#define ISB_SET
+#endif
+#if LAST_DEC_ROUND == ONE_TABLE
+#define IL1_SET
+#elif LAST_DEC_ROUND == FOUR_TABLES
+#define IL4_SET
+#elif !defined(ISB_SET)
+#define ISB_SET
+#endif
+#endif
+
+#if defined(ENCRYPTION_KEY_SCHEDULE) || defined(DECRYPTION_KEY_SCHEDULE)
+#if KEY_SCHED == ONE_TABLE
+#define LS1_SET
+#define IM1_SET
+#elif KEY_SCHED == FOUR_TABLES
+#define LS4_SET
+#define IM4_SET
+#elif !defined(SBX_SET)
+#define SBX_SET
+#endif
+#endif
+
+/* If there are no global variables, the AES tables are placed in
+ a structure and a pointer is added to the AES context. If this
+ facility is used, the calling program has to ensure that this
+ pointer is managed appropriately. In particular, the value of
+ the t_dec(in,it) item in the table structure must be set to zero
+ in order to ensure that the tables are initialised. In practice
+ the three code sequences in aeskey.c that control the calls to
+ gen_tabs() and the gen_tabs() routine itself will require some
+ changes for a specific implementation. If global variables are
+ available it will generally be preferable to use them with the
+ precomputed FIXED_TABLES option that uses static global tables.
+
+ The following defines can be used to control the way the tables
+ are defined, initialised and used in embedded environments that
+ require special features for these purposes
+
+ the 't_dec' construction is used to declare fixed table arrays
+ the 't_set' construction is used to set fixed table values
+ the 't_use' construction is used to access fixed table values
+
+ 256 byte tables:
+
+ t_xxx(s,box) => forward S box
+ t_xxx(i,box) => inverse S box
+
+ 256 32-bit word OR 4 x 256 32-bit word tables:
+
+ t_xxx(f,n) => forward normal round
+ t_xxx(f,l) => forward last round
+ t_xxx(i,n) => inverse normal round
+ t_xxx(i,l) => inverse last round
+ t_xxx(l,s) => key schedule table
+ t_xxx(i,m) => key schedule table
+
+ Other variables and tables:
+
+ t_xxx(r,c) => the rcon table
+*/
+
+#define t_dec(m,n) t_##m##n
+#define t_set(m,n) t_##m##n
+#define t_use(m,n) t_##m##n
+
+#if defined(DO_TABLES) /* declare and instantiate tables */
+
+/* finite field arithmetic operations for table generation */
+
+#if defined(FIXED_TABLES) || !defined(FF_TABLES)
+
+#define f2(x) ((x<<1) ^ (((x>>7) & 1) * WPOLY))
+#define f4(x) ((x<<2) ^ (((x>>6) & 1) * WPOLY) ^ (((x>>6) & 2) * WPOLY))
+#define f8(x) ((x<<3) ^ (((x>>5) & 1) * WPOLY) ^ (((x>>5) & 2) * WPOLY) \
+ ^ (((x>>5) & 4) * WPOLY))
+#define f3(x) (f2(x) ^ x)
+#define f9(x) (f8(x) ^ x)
+#define fb(x) (f8(x) ^ f2(x) ^ x)
+#define fd(x) (f8(x) ^ f4(x) ^ x)
+#define fe(x) (f8(x) ^ f4(x) ^ f2(x))
+
+#else
+
+#define f2(x) ((x) ? pow[log[x] + 0x19] : 0)
+#define f3(x) ((x) ? pow[log[x] + 0x01] : 0)
+#define f9(x) ((x) ? pow[log[x] + 0xc7] : 0)
+#define fb(x) ((x) ? pow[log[x] + 0x68] : 0)
+#define fd(x) ((x) ? pow[log[x] + 0xee] : 0)
+#define fe(x) ((x) ? pow[log[x] + 0xdf] : 0)
+#define fi(x) ((x) ? pow[ 255 - log[x]] : 0)
+
+#endif
+
+#if defined(FIXED_TABLES) /* declare and set values for static tables */
+
+#define sb_data(w) \
+ w(0x63), w(0x7c), w(0x77), w(0x7b), w(0xf2), w(0x6b), w(0x6f), w(0xc5),\
+ w(0x30), w(0x01), w(0x67), w(0x2b), w(0xfe), w(0xd7), w(0xab), w(0x76),\
+ w(0xca), w(0x82), w(0xc9), w(0x7d), w(0xfa), w(0x59), w(0x47), w(0xf0),\
+ w(0xad), w(0xd4), w(0xa2), w(0xaf), w(0x9c), w(0xa4), w(0x72), w(0xc0),\
+ w(0xb7), w(0xfd), w(0x93), w(0x26), w(0x36), w(0x3f), w(0xf7), w(0xcc),\
+ w(0x34), w(0xa5), w(0xe5), w(0xf1), w(0x71), w(0xd8), w(0x31), w(0x15),\
+ w(0x04), w(0xc7), w(0x23), w(0xc3), w(0x18), w(0x96), w(0x05), w(0x9a),\
+ w(0x07), w(0x12), w(0x80), w(0xe2), w(0xeb), w(0x27), w(0xb2), w(0x75),\
+ w(0x09), w(0x83), w(0x2c), w(0x1a), w(0x1b), w(0x6e), w(0x5a), w(0xa0),\
+ w(0x52), w(0x3b), w(0xd6), w(0xb3), w(0x29), w(0xe3), w(0x2f), w(0x84),\
+ w(0x53), w(0xd1), w(0x00), w(0xed), w(0x20), w(0xfc), w(0xb1), w(0x5b),\
+ w(0x6a), w(0xcb), w(0xbe), w(0x39), w(0x4a), w(0x4c), w(0x58), w(0xcf),\
+ w(0xd0), w(0xef), w(0xaa), w(0xfb), w(0x43), w(0x4d), w(0x33), w(0x85),\
+ w(0x45), w(0xf9), w(0x02), w(0x7f), w(0x50), w(0x3c), w(0x9f), w(0xa8),\
+ w(0x51), w(0xa3), w(0x40), w(0x8f), w(0x92), w(0x9d), w(0x38), w(0xf5),\
+ w(0xbc), w(0xb6), w(0xda), w(0x21), w(0x10), w(0xff), w(0xf3), w(0xd2),\
+ w(0xcd), w(0x0c), w(0x13), w(0xec), w(0x5f), w(0x97), w(0x44), w(0x17),\
+ w(0xc4), w(0xa7), w(0x7e), w(0x3d), w(0x64), w(0x5d), w(0x19), w(0x73),\
+ w(0x60), w(0x81), w(0x4f), w(0xdc), w(0x22), w(0x2a), w(0x90), w(0x88),\
+ w(0x46), w(0xee), w(0xb8), w(0x14), w(0xde), w(0x5e), w(0x0b), w(0xdb),\
+ w(0xe0), w(0x32), w(0x3a), w(0x0a), w(0x49), w(0x06), w(0x24), w(0x5c),\
+ w(0xc2), w(0xd3), w(0xac), w(0x62), w(0x91), w(0x95), w(0xe4), w(0x79),\
+ w(0xe7), w(0xc8), w(0x37), w(0x6d), w(0x8d), w(0xd5), w(0x4e), w(0xa9),\
+ w(0x6c), w(0x56), w(0xf4), w(0xea), w(0x65), w(0x7a), w(0xae), w(0x08),\
+ w(0xba), w(0x78), w(0x25), w(0x2e), w(0x1c), w(0xa6), w(0xb4), w(0xc6),\
+ w(0xe8), w(0xdd), w(0x74), w(0x1f), w(0x4b), w(0xbd), w(0x8b), w(0x8a),\
+ w(0x70), w(0x3e), w(0xb5), w(0x66), w(0x48), w(0x03), w(0xf6), w(0x0e),\
+ w(0x61), w(0x35), w(0x57), w(0xb9), w(0x86), w(0xc1), w(0x1d), w(0x9e),\
+ w(0xe1), w(0xf8), w(0x98), w(0x11), w(0x69), w(0xd9), w(0x8e), w(0x94),\
+ w(0x9b), w(0x1e), w(0x87), w(0xe9), w(0xce), w(0x55), w(0x28), w(0xdf),\
+ w(0x8c), w(0xa1), w(0x89), w(0x0d), w(0xbf), w(0xe6), w(0x42), w(0x68),\
+ w(0x41), w(0x99), w(0x2d), w(0x0f), w(0xb0), w(0x54), w(0xbb), w(0x16)
+
+#define isb_data(w) \
+ w(0x52), w(0x09), w(0x6a), w(0xd5), w(0x30), w(0x36), w(0xa5), w(0x38),\
+ w(0xbf), w(0x40), w(0xa3), w(0x9e), w(0x81), w(0xf3), w(0xd7), w(0xfb),\
+ w(0x7c), w(0xe3), w(0x39), w(0x82), w(0x9b), w(0x2f), w(0xff), w(0x87),\
+ w(0x34), w(0x8e), w(0x43), w(0x44), w(0xc4), w(0xde), w(0xe9), w(0xcb),\
+ w(0x54), w(0x7b), w(0x94), w(0x32), w(0xa6), w(0xc2), w(0x23), w(0x3d),\
+ w(0xee), w(0x4c), w(0x95), w(0x0b), w(0x42), w(0xfa), w(0xc3), w(0x4e),\
+ w(0x08), w(0x2e), w(0xa1), w(0x66), w(0x28), w(0xd9), w(0x24), w(0xb2),\
+ w(0x76), w(0x5b), w(0xa2), w(0x49), w(0x6d), w(0x8b), w(0xd1), w(0x25),\
+ w(0x72), w(0xf8), w(0xf6), w(0x64), w(0x86), w(0x68), w(0x98), w(0x16),\
+ w(0xd4), w(0xa4), w(0x5c), w(0xcc), w(0x5d), w(0x65), w(0xb6), w(0x92),\
+ w(0x6c), w(0x70), w(0x48), w(0x50), w(0xfd), w(0xed), w(0xb9), w(0xda),\
+ w(0x5e), w(0x15), w(0x46), w(0x57), w(0xa7), w(0x8d), w(0x9d), w(0x84),\
+ w(0x90), w(0xd8), w(0xab), w(0x00), w(0x8c), w(0xbc), w(0xd3), w(0x0a),\
+ w(0xf7), w(0xe4), w(0x58), w(0x05), w(0xb8), w(0xb3), w(0x45), w(0x06),\
+ w(0xd0), w(0x2c), w(0x1e), w(0x8f), w(0xca), w(0x3f), w(0x0f), w(0x02),\
+ w(0xc1), w(0xaf), w(0xbd), w(0x03), w(0x01), w(0x13), w(0x8a), w(0x6b),\
+ w(0x3a), w(0x91), w(0x11), w(0x41), w(0x4f), w(0x67), w(0xdc), w(0xea),\
+ w(0x97), w(0xf2), w(0xcf), w(0xce), w(0xf0), w(0xb4), w(0xe6), w(0x73),\
+ w(0x96), w(0xac), w(0x74), w(0x22), w(0xe7), w(0xad), w(0x35), w(0x85),\
+ w(0xe2), w(0xf9), w(0x37), w(0xe8), w(0x1c), w(0x75), w(0xdf), w(0x6e),\
+ w(0x47), w(0xf1), w(0x1a), w(0x71), w(0x1d), w(0x29), w(0xc5), w(0x89),\
+ w(0x6f), w(0xb7), w(0x62), w(0x0e), w(0xaa), w(0x18), w(0xbe), w(0x1b),\
+ w(0xfc), w(0x56), w(0x3e), w(0x4b), w(0xc6), w(0xd2), w(0x79), w(0x20),\
+ w(0x9a), w(0xdb), w(0xc0), w(0xfe), w(0x78), w(0xcd), w(0x5a), w(0xf4),\
+ w(0x1f), w(0xdd), w(0xa8), w(0x33), w(0x88), w(0x07), w(0xc7), w(0x31),\
+ w(0xb1), w(0x12), w(0x10), w(0x59), w(0x27), w(0x80), w(0xec), w(0x5f),\
+ w(0x60), w(0x51), w(0x7f), w(0xa9), w(0x19), w(0xb5), w(0x4a), w(0x0d),\
+ w(0x2d), w(0xe5), w(0x7a), w(0x9f), w(0x93), w(0xc9), w(0x9c), w(0xef),\
+ w(0xa0), w(0xe0), w(0x3b), w(0x4d), w(0xae), w(0x2a), w(0xf5), w(0xb0),\
+ w(0xc8), w(0xeb), w(0xbb), w(0x3c), w(0x83), w(0x53), w(0x99), w(0x61),\
+ w(0x17), w(0x2b), w(0x04), w(0x7e), w(0xba), w(0x77), w(0xd6), w(0x26),\
+ w(0xe1), w(0x69), w(0x14), w(0x63), w(0x55), w(0x21), w(0x0c), w(0x7d),
+
+#define mm_data(w) \
+ w(0x00), w(0x01), w(0x02), w(0x03), w(0x04), w(0x05), w(0x06), w(0x07),\
+ w(0x08), w(0x09), w(0x0a), w(0x0b), w(0x0c), w(0x0d), w(0x0e), w(0x0f),\
+ w(0x10), w(0x11), w(0x12), w(0x13), w(0x14), w(0x15), w(0x16), w(0x17),\
+ w(0x18), w(0x19), w(0x1a), w(0x1b), w(0x1c), w(0x1d), w(0x1e), w(0x1f),\
+ w(0x20), w(0x21), w(0x22), w(0x23), w(0x24), w(0x25), w(0x26), w(0x27),\
+ w(0x28), w(0x29), w(0x2a), w(0x2b), w(0x2c), w(0x2d), w(0x2e), w(0x2f),\
+ w(0x30), w(0x31), w(0x32), w(0x33), w(0x34), w(0x35), w(0x36), w(0x37),\
+ w(0x38), w(0x39), w(0x3a), w(0x3b), w(0x3c), w(0x3d), w(0x3e), w(0x3f),\
+ w(0x40), w(0x41), w(0x42), w(0x43), w(0x44), w(0x45), w(0x46), w(0x47),\
+ w(0x48), w(0x49), w(0x4a), w(0x4b), w(0x4c), w(0x4d), w(0x4e), w(0x4f),\
+ w(0x50), w(0x51), w(0x52), w(0x53), w(0x54), w(0x55), w(0x56), w(0x57),\
+ w(0x58), w(0x59), w(0x5a), w(0x5b), w(0x5c), w(0x5d), w(0x5e), w(0x5f),\
+ w(0x60), w(0x61), w(0x62), w(0x63), w(0x64), w(0x65), w(0x66), w(0x67),\
+ w(0x68), w(0x69), w(0x6a), w(0x6b), w(0x6c), w(0x6d), w(0x6e), w(0x6f),\
+ w(0x70), w(0x71), w(0x72), w(0x73), w(0x74), w(0x75), w(0x76), w(0x77),\
+ w(0x78), w(0x79), w(0x7a), w(0x7b), w(0x7c), w(0x7d), w(0x7e), w(0x7f),\
+ w(0x80), w(0x81), w(0x82), w(0x83), w(0x84), w(0x85), w(0x86), w(0x87),\
+ w(0x88), w(0x89), w(0x8a), w(0x8b), w(0x8c), w(0x8d), w(0x8e), w(0x8f),\
+ w(0x90), w(0x91), w(0x92), w(0x93), w(0x94), w(0x95), w(0x96), w(0x97),\
+ w(0x98), w(0x99), w(0x9a), w(0x9b), w(0x9c), w(0x9d), w(0x9e), w(0x9f),\
+ w(0xa0), w(0xa1), w(0xa2), w(0xa3), w(0xa4), w(0xa5), w(0xa6), w(0xa7),\
+ w(0xa8), w(0xa9), w(0xaa), w(0xab), w(0xac), w(0xad), w(0xae), w(0xaf),\
+ w(0xb0), w(0xb1), w(0xb2), w(0xb3), w(0xb4), w(0xb5), w(0xb6), w(0xb7),\
+ w(0xb8), w(0xb9), w(0xba), w(0xbb), w(0xbc), w(0xbd), w(0xbe), w(0xbf),\
+ w(0xc0), w(0xc1), w(0xc2), w(0xc3), w(0xc4), w(0xc5), w(0xc6), w(0xc7),\
+ w(0xc8), w(0xc9), w(0xca), w(0xcb), w(0xcc), w(0xcd), w(0xce), w(0xcf),\
+ w(0xd0), w(0xd1), w(0xd2), w(0xd3), w(0xd4), w(0xd5), w(0xd6), w(0xd7),\
+ w(0xd8), w(0xd9), w(0xda), w(0xdb), w(0xdc), w(0xdd), w(0xde), w(0xdf),\
+ w(0xe0), w(0xe1), w(0xe2), w(0xe3), w(0xe4), w(0xe5), w(0xe6), w(0xe7),\
+ w(0xe8), w(0xe9), w(0xea), w(0xeb), w(0xec), w(0xed), w(0xee), w(0xef),\
+ w(0xf0), w(0xf1), w(0xf2), w(0xf3), w(0xf4), w(0xf5), w(0xf6), w(0xf7),\
+ w(0xf8), w(0xf9), w(0xfa), w(0xfb), w(0xfc), w(0xfd), w(0xfe), w(0xff)
+
+#define h0(x) (x)
+
+/* These defines are used to ensure tables are generated in the
+ right format depending on the internal byte order required
+*/
+
+#define w0(p) bytes2word(p, 0, 0, 0)
+#define w1(p) bytes2word(0, p, 0, 0)
+#define w2(p) bytes2word(0, 0, p, 0)
+#define w3(p) bytes2word(0, 0, 0, p)
+
+#define u0(p) bytes2word(f2(p), p, p, f3(p))
+#define u1(p) bytes2word(f3(p), f2(p), p, p)
+#define u2(p) bytes2word(p, f3(p), f2(p), p)
+#define u3(p) bytes2word(p, p, f3(p), f2(p))
+
+#define v0(p) bytes2word(fe(p), f9(p), fd(p), fb(p))
+#define v1(p) bytes2word(fb(p), fe(p), f9(p), fd(p))
+#define v2(p) bytes2word(fd(p), fb(p), fe(p), f9(p))
+#define v3(p) bytes2word(f9(p), fd(p), fb(p), fe(p))
+
+const aes_32t t_dec(r,c)[RC_LENGTH] =
+{
+ w0(0x01), w0(0x02), w0(0x04), w0(0x08), w0(0x10),
+ w0(0x20), w0(0x40), w0(0x80), w0(0x1b), w0(0x36)
+};
+
+#define d_1(t,n,b,v) const t n[256] = { b(v##0) }
+#define d_4(t,n,b,v) const t n[4][256] = { { b(v##0) }, { b(v##1) }, { b(v##2) }, { b(v##3) } }
+
+#else /* declare and instantiate tables for dynamic value generation in in tab.c */
+
+aes_32t t_dec(r,c)[RC_LENGTH];
+
+#define d_1(t,n,b,v) t n[256]
+#define d_4(t,n,b,v) t n[4][256]
+
+#endif
+
+#else /* declare tables without instantiation */
+
+#if defined(FIXED_TABLES)
+
+extern const aes_32t t_dec(r,c)[RC_LENGTH];
+
+#if defined(_MSC_VER) && defined(TABLE_ALIGN)
+#define d_1(t,n,b,v) extern __declspec(align(TABLE_ALIGN)) const t n[256]
+#define d_4(t,n,b,v) extern __declspec(align(TABLE_ALIGN)) const t n[4][256]
+#else
+#define d_1(t,n,b,v) extern const t n[256]
+#define d_4(t,n,b,v) extern const t n[4][256]
+#endif
+#else
+
+extern aes_32t t_dec(r,c)[RC_LENGTH];
+
+#if defined(_MSC_VER) && defined(TABLE_ALIGN)
+#define d_1(t,n,b,v) extern __declspec(align(TABLE_ALIGN)) t n[256]
+#define d_4(t,n,b,v) extern __declspec(align(TABLE_ALIGN)) t n[4][256]
+#else
+#define d_1(t,n,b,v) extern t n[256]
+#define d_4(t,n,b,v) extern t n[4][256]
+#endif
+#endif
+
+#endif
+
+#ifdef SBX_SET
+ d_1(aes_08t, t_dec(s,box), sb_data, h);
+#endif
+#ifdef ISB_SET
+ d_1(aes_08t, t_dec(i,box), isb_data, h);
+#endif
+
+#ifdef FT1_SET
+ d_1(aes_32t, t_dec(f,n), sb_data, u);
+#endif
+#ifdef FT4_SET
+ d_4(aes_32t, t_dec(f,n), sb_data, u);
+#endif
+
+#ifdef FL1_SET
+ d_1(aes_32t, t_dec(f,l), sb_data, w);
+#endif
+#ifdef FL4_SET
+ d_4(aes_32t, t_dec(f,l), sb_data, w);
+#endif
+
+#ifdef IT1_SET
+ d_1(aes_32t, t_dec(i,n), isb_data, v);
+#endif
+#ifdef IT4_SET
+ d_4(aes_32t, t_dec(i,n), isb_data, v);
+#endif
+
+#ifdef IL1_SET
+ d_1(aes_32t, t_dec(i,l), isb_data, w);
+#endif
+#ifdef IL4_SET
+ d_4(aes_32t, t_dec(i,l), isb_data, w);
+#endif
+
+#ifdef LS1_SET
+#ifdef FL1_SET
+#undef LS1_SET
+#else
+ d_1(aes_32t, t_dec(l,s), sb_data, w);
+#endif
+#endif
+
+#ifdef LS4_SET
+#ifdef FL4_SET
+#undef LS4_SET
+#else
+ d_4(aes_32t, t_dec(l,s), sb_data, w);
+#endif
+#endif
+
+#ifdef IM1_SET
+ d_1(aes_32t, t_dec(i,m), mm_data, v);
+#endif
+#ifdef IM4_SET
+ d_4(aes_32t, t_dec(i,m), mm_data, v);
+#endif
+
+/* generic definitions of Rijndael macros that use tables */
+
+#define no_table(x,box,vf,rf,c) bytes2word( \
+ box[bval(vf(x,0,c),rf(0,c))], \
+ box[bval(vf(x,1,c),rf(1,c))], \
+ box[bval(vf(x,2,c),rf(2,c))], \
+ box[bval(vf(x,3,c),rf(3,c))])
+
+#define one_table(x,op,tab,vf,rf,c) \
+ ( tab[bval(vf(x,0,c),rf(0,c))] \
+ ^ op(tab[bval(vf(x,1,c),rf(1,c))],1) \
+ ^ op(tab[bval(vf(x,2,c),rf(2,c))],2) \
+ ^ op(tab[bval(vf(x,3,c),rf(3,c))],3))
+
+#define four_tables(x,tab,vf,rf,c) \
+ ( tab[0][bval(vf(x,0,c),rf(0,c))] \
+ ^ tab[1][bval(vf(x,1,c),rf(1,c))] \
+ ^ tab[2][bval(vf(x,2,c),rf(2,c))] \
+ ^ tab[3][bval(vf(x,3,c),rf(3,c))])
+
+#define vf1(x,r,c) (x)
+#define rf1(r,c) (r)
+#define rf2(r,c) ((8+r-c)&3)
+
+/* perform forward and inverse column mix operation on four bytes in long word x in */
+/* parallel. NOTE: x must be a simple variable, NOT an expression in these macros. */
+
+#if defined(FM4_SET) /* not currently used */
+#define fwd_mcol(x) four_tables(x,t_use(f,m),vf1,rf1,0)
+#elif defined(FM1_SET) /* not currently used */
+#define fwd_mcol(x) one_table(x,upr,t_use(f,m),vf1,rf1,0)
+#else
+#define dec_fmvars aes_32t g2
+#define fwd_mcol(x) (g2 = gf_mulx(x), g2 ^ upr((x) ^ g2, 3) ^ upr((x), 2) ^ upr((x), 1))
+#endif
+
+#if defined(IM4_SET)
+#define inv_mcol(x) four_tables(x,t_use(i,m),vf1,rf1,0)
+#elif defined(IM1_SET)
+#define inv_mcol(x) one_table(x,upr,t_use(i,m),vf1,rf1,0)
+#else
+#define dec_imvars aes_32t g2, g4, g9
+#define inv_mcol(x) (g2 = gf_mulx(x), g4 = gf_mulx(g2), g9 = (x) ^ gf_mulx(g4), g4 ^= g9, \
+ (x) ^ g2 ^ g4 ^ upr(g2 ^ g9, 3) ^ upr(g4, 2) ^ upr(g9, 1))
+#endif
+
+#if defined(FL4_SET)
+#define ls_box(x,c) four_tables(x,t_use(f,l),vf1,rf2,c)
+#elif defined(LS4_SET)
+#define ls_box(x,c) four_tables(x,t_use(l,s),vf1,rf2,c)
+#elif defined(FL1_SET)
+#define ls_box(x,c) one_table(x,upr,t_use(f,l),vf1,rf2,c)
+#elif defined(LS1_SET)
+#define ls_box(x,c) one_table(x,upr,t_use(l,s),vf1,rf2,c)
+#else
+#define ls_box(x,c) no_table(x,t_use(s,box),vf1,rf2,c)
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/scripts/conf-w32brg/cipher/aestab.c b/scripts/conf-w32brg/cipher/aestab.c
new file mode 100644
index 000000000..cf051f6c6
--- /dev/null
+++ b/scripts/conf-w32brg/cipher/aestab.c
@@ -0,0 +1,232 @@
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman <[email protected]>, Worcester, UK.
+ All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+ 1. distributions of this source code include the above copyright
+ notice, this list of conditions and the following disclaimer;
+
+ 2. distributions in binary form include the above copyright
+ notice, this list of conditions and the following disclaimer
+ in the documentation and/or other associated materials;
+
+ 3. the copyright holder's name is not used to endorse products
+ built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue Date: 1/06/2003
+
+*/
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#define DO_TABLES
+
+#include "aesopt.h"
+
+#if defined(FIXED_TABLES)
+
+/* implemented in case of wrong call for fixed tables */
+
+void gen_tabs(void)
+{
+}
+
+#else /* dynamic table generation */
+
+#if !defined(FF_TABLES)
+
+/* Generate the tables for the dynamic table option
+
+ It will generally be sensible to use tables to compute finite
+ field multiplies and inverses but where memory is scarse this
+ code might sometimes be better. But it only has effect during
+ initialisation so its pretty unimportant in overall terms.
+*/
+
+/* return 2 ^ (n - 1) where n is the bit number of the highest bit
+ set in x with x in the range 1 < x < 0x00000200. This form is
+ used so that locals within fi can be bytes rather than words
+*/
+
+static aes_08t hibit(const aes_32t x)
+{ aes_08t r = (aes_08t)((x >> 1) | (x >> 2));
+
+ r |= (r >> 2);
+ r |= (r >> 4);
+ return (r + 1) >> 1;
+}
+
+/* return the inverse of the finite field element x */
+
+static aes_08t fi(const aes_08t x)
+{ aes_08t p1 = x, p2 = BPOLY, n1 = hibit(x), n2 = 0x80, v1 = 1, v2 = 0;
+
+ if(x < 2) return x;
+
+ for(;;)
+ {
+ if(!n1) return v1;
+
+ while(n2 >= n1)
+ {
+ n2 /= n1; p2 ^= p1 * n2; v2 ^= v1 * n2; n2 = hibit(p2);
+ }
+
+ if(!n2) return v2;
+
+ while(n1 >= n2)
+ {
+ n1 /= n2; p1 ^= p2 * n1; v1 ^= v2 * n1; n1 = hibit(p1);
+ }
+ }
+}
+
+#endif
+
+/* The forward and inverse affine transformations used in the S-box */
+
+#define fwd_affine(x) \
+ (w = (aes_32t)x, w ^= (w<<1)^(w<<2)^(w<<3)^(w<<4), 0x63^(aes_08t)(w^(w>>8)))
+
+#define inv_affine(x) \
+ (w = (aes_32t)x, w = (w<<1)^(w<<3)^(w<<6), 0x05^(aes_08t)(w^(w>>8)))
+
+static int init = 0;
+
+void gen_tabs(void)
+{ aes_32t i, w;
+
+#if defined(FF_TABLES)
+
+ aes_08t pow[512], log[256];
+
+ if(init) return;
+ /* log and power tables for GF(2^8) finite field with
+ WPOLY as modular polynomial - the simplest primitive
+ root is 0x03, used here to generate the tables
+ */
+
+ i = 0; w = 1;
+ do
+ {
+ pow[i] = (aes_08t)w;
+ pow[i + 255] = (aes_08t)w;
+ log[w] = (aes_08t)i++;
+ w ^= (w << 1) ^ (w & 0x80 ? WPOLY : 0);
+ }
+ while (w != 1);
+
+#else
+ if(init) return;
+#endif
+
+ for(i = 0, w = 1; i < RC_LENGTH; ++i)
+ {
+ t_set(r,c)[i] = bytes2word(w, 0, 0, 0);
+ w = f2(w);
+ }
+
+ for(i = 0; i < 256; ++i)
+ { aes_08t b;
+
+ b = fwd_affine(fi((aes_08t)i));
+ w = bytes2word(f2(b), b, b, f3(b));
+
+#ifdef SBX_SET
+ t_set(s,box)[i] = b;
+#endif
+
+#ifdef FT1_SET /* tables for a normal encryption round */
+ t_set(f,n)[i] = w;
+#endif
+#ifdef FT4_SET
+ t_set(f,n)[0][i] = w;
+ t_set(f,n)[1][i] = upr(w,1);
+ t_set(f,n)[2][i] = upr(w,2);
+ t_set(f,n)[3][i] = upr(w,3);
+#endif
+ w = bytes2word(b, 0, 0, 0);
+
+#ifdef FL1_SET /* tables for last encryption round (may also */
+ t_set(f,l)[i] = w; /* be used in the key schedule) */
+#endif
+#ifdef FL4_SET
+ t_set(f,l)[0][i] = w;
+ t_set(f,l)[1][i] = upr(w,1);
+ t_set(f,l)[2][i] = upr(w,2);
+ t_set(f,l)[3][i] = upr(w,3);
+#endif
+
+#ifdef LS1_SET /* table for key schedule if t_set(f,l) above is */
+ t_set(l,s)[i] = w; /* not of the required form */
+#endif
+#ifdef LS4_SET
+ t_set(l,s)[0][i] = w;
+ t_set(l,s)[1][i] = upr(w,1);
+ t_set(l,s)[2][i] = upr(w,2);
+ t_set(l,s)[3][i] = upr(w,3);
+#endif
+
+ b = fi(inv_affine((aes_08t)i));
+ w = bytes2word(fe(b), f9(b), fd(b), fb(b));
+
+#ifdef IM1_SET /* tables for the inverse mix column operation */
+ t_set(i,m)[b] = w;
+#endif
+#ifdef IM4_SET
+ t_set(i,m)[0][b] = w;
+ t_set(i,m)[1][b] = upr(w,1);
+ t_set(i,m)[2][b] = upr(w,2);
+ t_set(i,m)[3][b] = upr(w,3);
+#endif
+
+#ifdef ISB_SET
+ t_set(i,box)[i] = b;
+#endif
+#ifdef IT1_SET /* tables for a normal decryption round */
+ t_set(i,n)[i] = w;
+#endif
+#ifdef IT4_SET
+ t_set(i,n)[0][i] = w;
+ t_set(i,n)[1][i] = upr(w,1);
+ t_set(i,n)[2][i] = upr(w,2);
+ t_set(i,n)[3][i] = upr(w,3);
+#endif
+ w = bytes2word(b, 0, 0, 0);
+#ifdef IL1_SET /* tables for last decryption round */
+ t_set(i,l)[i] = w;
+#endif
+#ifdef IL4_SET
+ t_set(i,l)[0][i] = w;
+ t_set(i,l)[1][i] = upr(w,1);
+ t_set(i,l)[2][i] = upr(w,2);
+ t_set(i,l)[3][i] = upr(w,3);
+#endif
+ }
+ init = 1;
+}
+
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
diff --git a/scripts/conf-w32brg/cipher/rijndael2.c b/scripts/conf-w32brg/cipher/rijndael2.c
new file mode 100644
index 000000000..421f993b2
--- /dev/null
+++ b/scripts/conf-w32brg/cipher/rijndael2.c
@@ -0,0 +1,279 @@
+/* Rijndael (AES) for GnuPG
+ * Copyright (C) 2000, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *******************************************************************
+ A version of rijndael.c modified by Brian Gladman to use his AES code
+ */
+
+#include <stdlib.h>
+#include <string.h> /* for memcmp() */
+
+#include "util.h"
+
+#include "aes.h"
+
+typedef struct
+{ aes_encrypt_ctx ectx[1];
+ aes_decrypt_ctx dctx[1];
+ unsigned int klen;
+ unsigned int dkey;
+} RIJNDAEL_context;
+
+static const char *selftest(void);
+static int tested = 0;
+
+static void
+burn_stack (int bytes)
+{
+ char buf[64];
+
+ wipememory(buf,sizeof buf);
+ bytes -= sizeof buf;
+ if (bytes > 0)
+ burn_stack (bytes);
+}
+
+static int
+rijndael_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
+{ int rc;
+
+ if(!tested)
+ { const char *tr;
+ tested = 1;
+ tr = selftest();
+ if(tr)
+ {
+ fprintf(stderr, "%s\n", tr );
+ return G10ERR_SELFTEST_FAILED;
+ }
+ }
+
+ ctx->klen = keylen;
+ ctx->dkey = 0;
+ rc = 0;
+ if(keylen == 16 || keylen == 24 || keylen == 32)
+ aes_encrypt_key(key, keylen, ctx->ectx);
+ else
+ rc = 1;
+ burn_stack ( 100 + 16*sizeof(int));
+ return rc;
+}
+
+static void
+rijndael_encrypt (const RIJNDAEL_context *ctx, byte *b, const byte *a)
+{
+ aes_encrypt(a, b, ctx->ectx);
+ burn_stack (16 + 2*sizeof(int));
+}
+
+static void
+rijndael_decrypt (RIJNDAEL_context *ctx, byte *b, const byte *a)
+{
+ if(!(ctx->dkey))
+ {
+ aes_decrypt_key((byte*)ctx->ectx, ctx->klen, ctx->dctx);
+ ctx->dkey = 1;
+ }
+ aes_decrypt(a, b, ctx->dctx);
+ burn_stack (16+2*sizeof(int));
+}
+
+/* Test a single encryption and decryption with each key size. */
+
+static const char*
+selftest (void)
+{
+ RIJNDAEL_context ctx;
+ byte scratch[16];
+
+ /* The test vectors are from the AES supplied ones; more or less
+ * randomly taken from ecb_tbl.txt (I=42,81,14)
+ */
+ static const byte plaintext[16] = {
+ 0x01,0x4B,0xAF,0x22,0x78,0xA6,0x9D,0x33,
+ 0x1D,0x51,0x80,0x10,0x36,0x43,0xE9,0x9A
+ };
+ static const byte key[16] = {
+ 0xE8,0xE9,0xEA,0xEB,0xED,0xEE,0xEF,0xF0,
+ 0xF2,0xF3,0xF4,0xF5,0xF7,0xF8,0xF9,0xFA
+ };
+ static const byte ciphertext[16] = {
+ 0x67,0x43,0xC3,0xD1,0x51,0x9A,0xB4,0xF2,
+ 0xCD,0x9A,0x78,0xAB,0x09,0xA5,0x11,0xBD
+ };
+
+ static const byte plaintext_192[16] = {
+ 0x76,0x77,0x74,0x75,0xF1,0xF2,0xF3,0xF4,
+ 0xF8,0xF9,0xE6,0xE7,0x77,0x70,0x71,0x72
+ };
+ static const byte key_192[24] = {
+ 0x04,0x05,0x06,0x07,0x09,0x0A,0x0B,0x0C,
+ 0x0E,0x0F,0x10,0x11,0x13,0x14,0x15,0x16,
+ 0x18,0x19,0x1A,0x1B,0x1D,0x1E,0x1F,0x20
+ };
+ static const byte ciphertext_192[16] = {
+ 0x5D,0x1E,0xF2,0x0D,0xCE,0xD6,0xBC,0xBC,
+ 0x12,0x13,0x1A,0xC7,0xC5,0x47,0x88,0xAA
+ };
+
+ static const byte plaintext_256[16] = {
+ 0x06,0x9A,0x00,0x7F,0xC7,0x6A,0x45,0x9F,
+ 0x98,0xBA,0xF9,0x17,0xFE,0xDF,0x95,0x21
+ };
+ static const byte key_256[32] = {
+ 0x08,0x09,0x0A,0x0B,0x0D,0x0E,0x0F,0x10,
+ 0x12,0x13,0x14,0x15,0x17,0x18,0x19,0x1A,
+ 0x1C,0x1D,0x1E,0x1F,0x21,0x22,0x23,0x24,
+ 0x26,0x27,0x28,0x29,0x2B,0x2C,0x2D,0x2E
+ };
+ static const byte ciphertext_256[16] = {
+ 0x08,0x0E,0x95,0x17,0xEB,0x16,0x77,0x71,
+ 0x9A,0xCF,0x72,0x80,0x86,0x04,0x0A,0xE3
+ };
+
+ rijndael_setkey (&ctx, key, sizeof(key));
+ rijndael_encrypt (&ctx, scratch, plaintext);
+ if (memcmp (scratch, ciphertext, sizeof (ciphertext)))
+ return "Rijndael-128 test encryption failed.";
+ rijndael_decrypt (&ctx, scratch, scratch);
+ if (memcmp (scratch, plaintext, sizeof (plaintext)))
+ return "Rijndael-128 test decryption failed.";
+
+ rijndael_setkey (&ctx, key_192, sizeof(key_192));
+ rijndael_encrypt (&ctx, scratch, plaintext_192);
+ if (memcmp (scratch, ciphertext_192, sizeof (ciphertext_192)))
+ return "Rijndael-192 test encryption failed.";
+ rijndael_decrypt (&ctx, scratch, scratch);
+ if (memcmp (scratch, plaintext_192, sizeof (plaintext_192)))
+ return "Rijndael-192 test decryption failed.";
+
+ rijndael_setkey (&ctx, key_256, sizeof(key_256));
+ rijndael_encrypt (&ctx, scratch, plaintext_256);
+ if (memcmp (scratch, ciphertext_256, sizeof (ciphertext_256)))
+ return "Rijndael-256 test encryption failed.";
+ rijndael_decrypt (&ctx, scratch, scratch);
+ if (memcmp (scratch, plaintext_256, sizeof (plaintext_256)))
+ return "Rijndael-256 test decryption failed.";
+
+ return NULL;
+}
+
+#ifdef IS_MODULE
+static
+#endif
+ const char *
+ rijndael_get_info (int algo, size_t *keylen,
+ size_t *blocksize, size_t *contextsize,
+ int (**r_setkey) (void *c, byte *key, unsigned keylen),
+ void (**r_encrypt) (void *c, byte *outbuf, byte *inbuf),
+ void (**r_decrypt) (void *c, byte *outbuf, byte *inbuf)
+ )
+{
+ *keylen = algo==7? 128 : algo==8? 192 : 256;
+ *blocksize = 16;
+ *contextsize = sizeof (RIJNDAEL_context);
+
+ *(int (**)(RIJNDAEL_context*, const byte*, const unsigned))r_setkey
+ = rijndael_setkey;
+ *(void (**)(const RIJNDAEL_context*, byte*, const byte*))r_encrypt
+ = rijndael_encrypt;
+ *(void (**)(RIJNDAEL_context*, byte*, const byte*))r_decrypt
+ = rijndael_decrypt;
+
+ if( algo == 7 )
+ return "AES";
+ if (algo == 8)
+ return "AES192";
+ if (algo == 9)
+ return "AES256";
+ return NULL;
+}
+
+
+#ifdef IS_MODULE
+static
+const char * const gnupgext_version = "RIJNDAEL ($Revision$)";
+
+static struct {
+ int class;
+ int version;
+ int value;
+ void (*func)(void);
+} func_table[] = {
+ { 20, 1, 0, (void*)rijndael_get_info },
+ { 21, 1, 7 },
+ { 21, 1, 8 },
+ { 21, 1, 9 },
+};
+
+
+
+/****************
+ * Enumerate the names of the functions together with information about
+ * this function. Set sequence to an integer with a initial value of 0 and
+ * do not change it.
+ * If what is 0 all kind of functions are returned.
+ * Return values: class := class of function:
+ * 10 = message digest algorithm info function
+ * 11 = integer with available md algorithms
+ * 20 = cipher algorithm info function
+ * 21 = integer with available cipher algorithms
+ * 30 = public key algorithm info function
+ * 31 = integer with available pubkey algorithms
+ * version = interface version of the function/pointer
+ * (currently this is 1 for all functions)
+ */
+static
+void *
+gnupgext_enum_func ( int what, int *sequence, int *class, int *vers )
+{
+ void *ret;
+ int i = *sequence;
+
+ do {
+ if ( i >= DIM(func_table) || i < 0 ) {
+ return NULL;
+ }
+ *class = func_table[i].class;
+ *vers = func_table[i].version;
+ switch( *class ) {
+ case 11:
+ case 21:
+ case 31:
+ ret = &func_table[i].value;
+ break;
+ default:
+ ret = func_table[i].func;
+ break;
+ }
+ i++;
+ } while ( what && what != *class );
+
+ *sequence = i;
+ return ret;
+}
+#endif
+
+
+
+
+
+
+
+
+