libbtc
bitcoinclibrary
aes.c
Go to the documentation of this file.
1 /*
2  ---------------------------------------------------------------------------
3  Copyright (c) 1998-2008, Brian Gladman, Worcester, UK. All rights reserved.
4 
5  LICENSE TERMS
6 
7  The redistribution and use of this software (with or without changes)
8  is allowed without the payment of fees or royalties provided that:
9 
10  1. source code distributions include the above copyright notice, this
11  list of conditions and the following disclaimer;
12 
13  2. binary distributions include the above copyright notice, this list
14  of conditions and the following disclaimer in their documentation;
15 
16  3. the name of the copyright holder is not used to endorse products
17  built using this software without specific written permission.
18 
19  DISCLAIMER
20 
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
25  * OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27  * OTHER DEALINGS IN THE SOFTWARE.
28 
29  ---------------------------------------------------------------------------
30  Issue 09/09/2006
31 
32  This is an AES implementation that uses only 8-bit byte operations on the
33  cipher state (there are options to use 32-bit types if available).
34 
35  The combination of mix columns and byte substitution used here is based on
36  that developed by Karl Malbrain. His contribution is acknowledged.
37  */
38 
39 
40 /* define if you have a fast memcpy function on your system */
41 #if 1
42 #define HAVE_MEMCPY
43 #include <string.h>
44 #if defined(_MSC_VER)
45 #include <intrin.h>
46 #pragma intrinsic(memcpy)
47 #endif
48 #endif
49 
50 #include <stdlib.h>
51 #include <stdint.h>
52 
53 
54 /* define if you have fast 32-bit types on your system */
55 #if 1
56 #define HAVE_UINT_32T
57 #endif
58 
59 /* define if you don't want any tables */
60 #if 1
61 #define USE_TABLES
62 #endif
63 
64 /* On Intel Core 2 duo VERSION_1 is faster */
65 
66 /* alternative versions (test for performance on your system) */
67 #if 1
68 #define VERSION_1
69 #endif
70 
71 #include "aes.h"
72 
73 #if defined(HAVE_UINT_32T)
74 //typedef unsigned long uint_32t;
75 typedef uint32_t uint_32t;
76 #endif
77 
78 /* functions for finite field multiplication in the AES Galois field */
79 
80 #define WPOLY 0x011b
81 #define BPOLY 0x1b
82 #define DPOLY 0x008d
83 
84 #define f1(x) (x)
85 #define f2(x) ((x << 1) ^ (((x >> 7) & 1) * WPOLY))
86 #define f4(x) ((x << 2) ^ (((x >> 6) & 1) * WPOLY) ^ (((x >> 6) & 2) * WPOLY))
87 #define f8(x) ((x << 3) ^ (((x >> 5) & 1) * WPOLY) ^ (((x >> 5) & 2) * WPOLY) ^ (((x >> 5) & 4) * WPOLY))
88 #define d2(x) (((x) >> 1) ^ ((x)&1 ? DPOLY : 0))
89 
90 #define f3(x) (f2(x) ^ x)
91 #define f9(x) (f8(x) ^ x)
92 #define fb(x) (f8(x) ^ f2(x) ^ x)
93 #define fd(x) (f8(x) ^ f4(x) ^ x)
94 #define fe(x) (f8(x) ^ f4(x) ^ f2(x))
95 
96 #if defined(USE_TABLES)
97 
98 #define sb_data(w) \
99  {/* S Box data values */ \
100  w(0x63), w(0x7c), w(0x77), w(0x7b), w(0xf2), w(0x6b), w(0x6f), w(0xc5), w(0x30), w(0x01), w(0x67), w(0x2b), w(0xfe), w(0xd7), w(0xab), w(0x76), w(0xca), w(0x82), w(0xc9), w(0x7d), w(0xfa), w(0x59), w(0x47), w(0xf0), w(0xad), w(0xd4), w(0xa2), w(0xaf), w(0x9c), w(0xa4), w(0x72), w(0xc0), w(0xb7), w(0xfd), w(0x93), w(0x26), w(0x36), w(0x3f), w(0xf7), w(0xcc), w(0x34), w(0xa5), w(0xe5), w(0xf1), w(0x71), w(0xd8), w(0x31), w(0x15), w(0x04), w(0xc7), w(0x23), w(0xc3), w(0x18), w(0x96), w(0x05), w(0x9a), w(0x07), w(0x12), w(0x80), w(0xe2), w(0xeb), w(0x27), w(0xb2), w(0x75), w(0x09), w(0x83), w(0x2c), w(0x1a), w(0x1b), w(0x6e), w(0x5a), w(0xa0), w(0x52), w(0x3b), w(0xd6), w(0xb3), w(0x29), w(0xe3), w(0x2f), w(0x84), w(0x53), w(0xd1), w(0x00), w(0xed), w(0x20), w(0xfc), w(0xb1), w(0x5b), w(0x6a), w(0xcb), w(0xbe), w(0x39), w(0x4a), w(0x4c), w(0x58), w(0xcf), w(0xd0), w(0xef), w(0xaa), w(0xfb), w(0x43), w(0x4d), w(0x33), w(0x85), w(0x45), w(0xf9), w(0x02), w(0x7f), w(0x50), w(0x3c), w(0x9f), w(0xa8), w(0x51), w(0xa3), w(0x40), w(0x8f), w(0x92), w(0x9d), w(0x38), w(0xf5), w(0xbc), w(0xb6), w(0xda), w(0x21), w(0x10), w(0xff), w(0xf3), w(0xd2), w(0xcd), w(0x0c), w(0x13), w(0xec), w(0x5f), w(0x97), w(0x44), w(0x17), w(0xc4), w(0xa7), w(0x7e), w(0x3d), w(0x64), w(0x5d), w(0x19), w(0x73), w(0x60), w(0x81), w(0x4f), w(0xdc), w(0x22), w(0x2a), w(0x90), w(0x88), w(0x46), w(0xee), w(0xb8), w(0x14), w(0xde), w(0x5e), w(0x0b), w(0xdb), w(0xe0), w(0x32), w(0x3a), w(0x0a), w(0x49), w(0x06), w(0x24), w(0x5c), w(0xc2), w(0xd3), w(0xac), w(0x62), w(0x91), w(0x95), w(0xe4), w(0x79), w(0xe7), w(0xc8), w(0x37), w(0x6d), w(0x8d), w(0xd5), w(0x4e), w(0xa9), w(0x6c), w(0x56), w(0xf4), w(0xea), w(0x65), w(0x7a), w(0xae), w(0x08), w(0xba), w(0x78), w(0x25), w(0x2e), w(0x1c), w(0xa6), w(0xb4), w(0xc6), w(0xe8), w(0xdd), w(0x74), w(0x1f), w(0x4b), w(0xbd), w(0x8b), w(0x8a), w(0x70), w(0x3e), w(0xb5), w(0x66), w(0x48), w(0x03), w(0xf6), w(0x0e), w(0x61), w(0x35), w(0x57), w(0xb9), w(0x86), w(0xc1), w(0x1d), w(0x9e), w(0xe1), w(0xf8), w(0x98), w(0x11), w(0x69), w(0xd9), w(0x8e), w(0x94), w(0x9b), w(0x1e), w(0x87), w(0xe9), w(0xce), w(0x55), w(0x28), w(0xdf), w(0x8c), w(0xa1), w(0x89), w(0x0d), w(0xbf), w(0xe6), w(0x42), w(0x68), w(0x41), w(0x99), w(0x2d), w(0x0f), w(0xb0), w(0x54), w(0xbb), w(0x16) \
101  }
102 
103 #define isb_data(w) \
104  {/* inverse S Box data values */ \
105  w(0x52), w(0x09), w(0x6a), w(0xd5), w(0x30), w(0x36), w(0xa5), w(0x38), w(0xbf), w(0x40), w(0xa3), w(0x9e), w(0x81), w(0xf3), w(0xd7), w(0xfb), w(0x7c), w(0xe3), w(0x39), w(0x82), w(0x9b), w(0x2f), w(0xff), w(0x87), w(0x34), w(0x8e), w(0x43), w(0x44), w(0xc4), w(0xde), w(0xe9), w(0xcb), w(0x54), w(0x7b), w(0x94), w(0x32), w(0xa6), w(0xc2), w(0x23), w(0x3d), w(0xee), w(0x4c), w(0x95), w(0x0b), w(0x42), w(0xfa), w(0xc3), w(0x4e), w(0x08), w(0x2e), w(0xa1), w(0x66), w(0x28), w(0xd9), w(0x24), w(0xb2), w(0x76), w(0x5b), w(0xa2), w(0x49), w(0x6d), w(0x8b), w(0xd1), w(0x25), w(0x72), w(0xf8), w(0xf6), w(0x64), w(0x86), w(0x68), w(0x98), w(0x16), w(0xd4), w(0xa4), w(0x5c), w(0xcc), w(0x5d), w(0x65), w(0xb6), w(0x92), w(0x6c), w(0x70), w(0x48), w(0x50), w(0xfd), w(0xed), w(0xb9), w(0xda), w(0x5e), w(0x15), w(0x46), w(0x57), w(0xa7), w(0x8d), w(0x9d), w(0x84), w(0x90), w(0xd8), w(0xab), w(0x00), w(0x8c), w(0xbc), w(0xd3), w(0x0a), w(0xf7), w(0xe4), w(0x58), w(0x05), w(0xb8), w(0xb3), w(0x45), w(0x06), w(0xd0), w(0x2c), w(0x1e), w(0x8f), w(0xca), w(0x3f), w(0x0f), w(0x02), w(0xc1), w(0xaf), w(0xbd), w(0x03), w(0x01), w(0x13), w(0x8a), w(0x6b), w(0x3a), w(0x91), w(0x11), w(0x41), w(0x4f), w(0x67), w(0xdc), w(0xea), w(0x97), w(0xf2), w(0xcf), w(0xce), w(0xf0), w(0xb4), w(0xe6), w(0x73), w(0x96), w(0xac), w(0x74), w(0x22), w(0xe7), w(0xad), w(0x35), w(0x85), w(0xe2), w(0xf9), w(0x37), w(0xe8), w(0x1c), w(0x75), w(0xdf), w(0x6e), w(0x47), w(0xf1), w(0x1a), w(0x71), w(0x1d), w(0x29), w(0xc5), w(0x89), w(0x6f), w(0xb7), w(0x62), w(0x0e), w(0xaa), w(0x18), w(0xbe), w(0x1b), w(0xfc), w(0x56), w(0x3e), w(0x4b), w(0xc6), w(0xd2), w(0x79), w(0x20), w(0x9a), w(0xdb), w(0xc0), w(0xfe), w(0x78), w(0xcd), w(0x5a), w(0xf4), w(0x1f), w(0xdd), w(0xa8), w(0x33), w(0x88), w(0x07), w(0xc7), w(0x31), w(0xb1), w(0x12), w(0x10), w(0x59), w(0x27), w(0x80), w(0xec), w(0x5f), w(0x60), w(0x51), w(0x7f), w(0xa9), w(0x19), w(0xb5), w(0x4a), w(0x0d), w(0x2d), w(0xe5), w(0x7a), w(0x9f), w(0x93), w(0xc9), w(0x9c), w(0xef), w(0xa0), w(0xe0), w(0x3b), w(0x4d), w(0xae), w(0x2a), w(0xf5), w(0xb0), w(0xc8), w(0xeb), w(0xbb), w(0x3c), w(0x83), w(0x53), w(0x99), w(0x61), w(0x17), w(0x2b), w(0x04), w(0x7e), w(0xba), w(0x77), w(0xd6), w(0x26), w(0xe1), w(0x69), w(0x14), w(0x63), w(0x55), w(0x21), w(0x0c), w(0x7d) \
106  }
107 
108 #define mm_data(w) \
109  {/* basic data for forming finite field tables */ \
110  w(0x00), w(0x01), w(0x02), w(0x03), w(0x04), w(0x05), w(0x06), w(0x07), w(0x08), w(0x09), w(0x0a), w(0x0b), w(0x0c), w(0x0d), w(0x0e), w(0x0f), w(0x10), w(0x11), w(0x12), w(0x13), w(0x14), w(0x15), w(0x16), w(0x17), w(0x18), w(0x19), w(0x1a), w(0x1b), w(0x1c), w(0x1d), w(0x1e), w(0x1f), w(0x20), w(0x21), w(0x22), w(0x23), w(0x24), w(0x25), w(0x26), w(0x27), w(0x28), w(0x29), w(0x2a), w(0x2b), w(0x2c), w(0x2d), w(0x2e), w(0x2f), w(0x30), w(0x31), w(0x32), w(0x33), w(0x34), w(0x35), w(0x36), w(0x37), w(0x38), w(0x39), w(0x3a), w(0x3b), w(0x3c), w(0x3d), w(0x3e), w(0x3f), w(0x40), w(0x41), w(0x42), w(0x43), w(0x44), w(0x45), w(0x46), w(0x47), w(0x48), w(0x49), w(0x4a), w(0x4b), w(0x4c), w(0x4d), w(0x4e), w(0x4f), w(0x50), w(0x51), w(0x52), w(0x53), w(0x54), w(0x55), w(0x56), w(0x57), w(0x58), w(0x59), w(0x5a), w(0x5b), w(0x5c), w(0x5d), w(0x5e), w(0x5f), w(0x60), w(0x61), w(0x62), w(0x63), w(0x64), w(0x65), w(0x66), w(0x67), w(0x68), w(0x69), w(0x6a), w(0x6b), w(0x6c), w(0x6d), w(0x6e), w(0x6f), w(0x70), w(0x71), w(0x72), w(0x73), w(0x74), w(0x75), w(0x76), w(0x77), w(0x78), w(0x79), w(0x7a), w(0x7b), w(0x7c), w(0x7d), w(0x7e), w(0x7f), w(0x80), w(0x81), w(0x82), w(0x83), w(0x84), w(0x85), w(0x86), w(0x87), w(0x88), w(0x89), w(0x8a), w(0x8b), w(0x8c), w(0x8d), w(0x8e), w(0x8f), w(0x90), w(0x91), w(0x92), w(0x93), w(0x94), w(0x95), w(0x96), w(0x97), w(0x98), w(0x99), w(0x9a), w(0x9b), w(0x9c), w(0x9d), w(0x9e), w(0x9f), w(0xa0), w(0xa1), w(0xa2), w(0xa3), w(0xa4), w(0xa5), w(0xa6), w(0xa7), w(0xa8), w(0xa9), w(0xaa), w(0xab), w(0xac), w(0xad), w(0xae), w(0xaf), w(0xb0), w(0xb1), w(0xb2), w(0xb3), w(0xb4), w(0xb5), w(0xb6), w(0xb7), w(0xb8), w(0xb9), w(0xba), w(0xbb), w(0xbc), w(0xbd), w(0xbe), w(0xbf), w(0xc0), w(0xc1), w(0xc2), w(0xc3), w(0xc4), w(0xc5), w(0xc6), w(0xc7), w(0xc8), w(0xc9), w(0xca), w(0xcb), w(0xcc), w(0xcd), w(0xce), w(0xcf), w(0xd0), w(0xd1), w(0xd2), w(0xd3), w(0xd4), w(0xd5), w(0xd6), w(0xd7), w(0xd8), w(0xd9), w(0xda), w(0xdb), w(0xdc), w(0xdd), w(0xde), w(0xdf), w(0xe0), w(0xe1), w(0xe2), w(0xe3), w(0xe4), w(0xe5), w(0xe6), w(0xe7), w(0xe8), w(0xe9), w(0xea), w(0xeb), w(0xec), w(0xed), w(0xee), w(0xef), w(0xf0), w(0xf1), w(0xf2), w(0xf3), w(0xf4), w(0xf5), w(0xf6), w(0xf7), w(0xf8), w(0xf9), w(0xfa), w(0xfb), w(0xfc), w(0xfd), w(0xfe), w(0xff) \
111  }
112 
113 static const uint_8t sbox[256] = sb_data(f1);
114 static const uint_8t isbox[256] = isb_data(f1);
115 
116 static const uint_8t gfm2_sbox[256] = sb_data(f2);
117 static const uint_8t gfm3_sbox[256] = sb_data(f3);
118 
119 static const uint_8t gfmul_9[256] = mm_data(f9);
120 static const uint_8t gfmul_b[256] = mm_data(fb);
121 static const uint_8t gfmul_d[256] = mm_data(fd);
122 static const uint_8t gfmul_e[256] = mm_data(fe);
123 
124 #define s_box(x) sbox[(x)]
125 #define is_box(x) isbox[(x)]
126 #define gfm2_sb(x) gfm2_sbox[(x)]
127 #define gfm3_sb(x) gfm3_sbox[(x)]
128 #define gfm_9(x) gfmul_9[(x)]
129 #define gfm_b(x) gfmul_b[(x)]
130 #define gfm_d(x) gfmul_d[(x)]
131 #define gfm_e(x) gfmul_e[(x)]
132 
133 #else
134 
135 /* this is the high bit of x right shifted by 1 */
136 /* position. Since the starting polynomial has */
137 /* 9 bits (0x11b), this right shift keeps the */
138 /* values of all top bits within a byte */
139 
140 static uint_8t hibit(const uint_8t x)
141 {
142  uint_8t r = (uint_8t)((x >> 1) | (x >> 2));
143 
144  r |= (r >> 2);
145  r |= (r >> 4);
146  return (r + 1) >> 1;
147 }
148 
149 /* return the inverse of the finite field element x */
150 
151 static uint_8t gf_inv(const uint_8t x)
152 {
153  uint_8t p1 = x, p2 = BPOLY, n1 = hibit(x), n2 = 0x80, v1 = 1, v2 = 0;
154 
155  if (x < 2) {
156  return x;
157  }
158 
159  for (;;) {
160  if (n1)
161  while (n2 >= n1) { /* divide polynomial p2 by p1 */
162  n2 /= n1; /* shift smaller polynomial left */
163  p2 ^= (p1 * n2) & 0xff; /* and remove from larger one */
164  v2 ^= (v1 * n2); /* shift accumulated value and */
165  n2 = hibit(p2); /* add into result */
166  }
167  else {
168  return v1;
169  }
170 
171  if (n2) /* repeat with values swapped */
172  while (n1 >= n2) {
173  n1 /= n2;
174  p1 ^= p2 * n1;
175  v1 ^= v2 * n1;
176  n1 = hibit(p1);
177  }
178  else {
179  return v2;
180  }
181  }
182 }
183 
184 /* The forward and inverse affine transformations used in the S-box */
185 uint_8t fwd_affine(const uint_8t x)
186 {
187 #if defined(HAVE_UINT_32T)
188  uint_32t w = x;
189  w ^= (w << 1) ^ (w << 2) ^ (w << 3) ^ (w << 4);
190  return 0x63 ^ ((w ^ (w >> 8)) & 0xff);
191 #else
192  return 0x63 ^ x ^ (x << 1) ^ (x << 2) ^ (x << 3) ^ (x << 4) ^ (x >> 7) ^ (x >> 6) ^ (x >> 5) ^ (x >> 4);
193 #endif
194 }
195 
196 uint_8t inv_affine(const uint_8t x)
197 {
198 #if defined(HAVE_UINT_32T)
199  uint_32t w = x;
200  w = (w << 1) ^ (w << 3) ^ (w << 6);
201  return 0x05 ^ ((w ^ (w >> 8)) & 0xff);
202 #else
203  return 0x05 ^ (x << 1) ^ (x << 3) ^ (x << 6) ^ (x >> 7) ^ (x >> 5) ^ (x >> 2);
204 #endif
205 }
206 
207 #define s_box(x) fwd_affine(gf_inv(x))
208 #define is_box(x) gf_inv(inv_affine(x))
209 #define gfm2_sb(x) f2(s_box(x))
210 #define gfm3_sb(x) f3(s_box(x))
211 #define gfm_9(x) f9(x)
212 #define gfm_b(x) fb(x)
213 #define gfm_d(x) fd(x)
214 #define gfm_e(x) fe(x)
215 
216 #endif
217 
218 #if defined(HAVE_MEMCPY)
219 #define block_copy_nn(d, s, l) memcpy(d, s, l)
220 #define block_copy(d, s) memcpy(d, s, N_BLOCK)
221 #else
222 #define block_copy_nn(d, s, l) copy_block_nn(d, s, l)
223 #define block_copy(d, s) copy_block(d, s)
224 #endif
225 
226 
227 #if defined(HAVE_MEMCPY)
228 #else
229 static void copy_block(void* d, const void* s)
230 {
231 #if defined(HAVE_UINT_32T)
232  ((uint_32t*)d)[0] = ((uint_32t*)s)[0];
233  ((uint_32t*)d)[1] = ((uint_32t*)s)[1];
234  ((uint_32t*)d)[2] = ((uint_32t*)s)[2];
235  ((uint_32t*)d)[3] = ((uint_32t*)s)[3];
236 #else
237  ((uint_8t*)d)[0] = ((uint_8t*)s)[0];
238  ((uint_8t*)d)[1] = ((uint_8t*)s)[1];
239  ((uint_8t*)d)[2] = ((uint_8t*)s)[2];
240  ((uint_8t*)d)[3] = ((uint_8t*)s)[3];
241  ((uint_8t*)d)[4] = ((uint_8t*)s)[4];
242  ((uint_8t*)d)[5] = ((uint_8t*)s)[5];
243  ((uint_8t*)d)[6] = ((uint_8t*)s)[6];
244  ((uint_8t*)d)[7] = ((uint_8t*)s)[7];
245  ((uint_8t*)d)[8] = ((uint_8t*)s)[8];
246  ((uint_8t*)d)[9] = ((uint_8t*)s)[9];
247  ((uint_8t*)d)[10] = ((uint_8t*)s)[10];
248  ((uint_8t*)d)[11] = ((uint_8t*)s)[11];
249  ((uint_8t*)d)[12] = ((uint_8t*)s)[12];
250  ((uint_8t*)d)[13] = ((uint_8t*)s)[13];
251  ((uint_8t*)d)[14] = ((uint_8t*)s)[14];
252  ((uint_8t*)d)[15] = ((uint_8t*)s)[15];
253 #endif
254 }
255 
256 static void copy_block_nn(void* d, const void* s, uint_8t nn)
257 {
258  while (nn--) {
259  *((uint_8t*)d)++ = *((uint_8t*)s)++;
260  }
261 }
262 #endif
263 
264 
265 static void xor_block(void* d, const void* s)
266 {
267 #if defined(HAVE_UINT_32T)
268  ((uint_32t*)d)[0] ^= ((const uint_32t*)s)[0];
269  ((uint_32t*)d)[1] ^= ((const uint_32t*)s)[1];
270  ((uint_32t*)d)[2] ^= ((const uint_32t*)s)[2];
271  ((uint_32t*)d)[3] ^= ((const uint_32t*)s)[3];
272 #else
273  ((uint_8t*)d)[0] ^= ((uint_8t*)s)[0];
274  ((uint_8t*)d)[1] ^= ((uint_8t*)s)[1];
275  ((uint_8t*)d)[2] ^= ((uint_8t*)s)[2];
276  ((uint_8t*)d)[3] ^= ((uint_8t*)s)[3];
277  ((uint_8t*)d)[4] ^= ((uint_8t*)s)[4];
278  ((uint_8t*)d)[5] ^= ((uint_8t*)s)[5];
279  ((uint_8t*)d)[6] ^= ((uint_8t*)s)[6];
280  ((uint_8t*)d)[7] ^= ((uint_8t*)s)[7];
281  ((uint_8t*)d)[8] ^= ((uint_8t*)s)[8];
282  ((uint_8t*)d)[9] ^= ((uint_8t*)s)[9];
283  ((uint_8t*)d)[10] ^= ((uint_8t*)s)[10];
284  ((uint_8t*)d)[11] ^= ((uint_8t*)s)[11];
285  ((uint_8t*)d)[12] ^= ((uint_8t*)s)[12];
286  ((uint_8t*)d)[13] ^= ((uint_8t*)s)[13];
287  ((uint_8t*)d)[14] ^= ((uint_8t*)s)[14];
288  ((uint_8t*)d)[15] ^= ((uint_8t*)s)[15];
289 #endif
290 }
291 
292 static void copy_and_key(void* d, const void* s, const void* k)
293 {
294 #if defined(HAVE_UINT_32T)
295  ((uint_32t*)d)[0] = ((const uint_32t*)s)[0] ^ ((const uint_32t*)k)[0];
296  ((uint_32t*)d)[1] = ((const uint_32t*)s)[1] ^ ((const uint_32t*)k)[1];
297  ((uint_32t*)d)[2] = ((const uint_32t*)s)[2] ^ ((const uint_32t*)k)[2];
298  ((uint_32t*)d)[3] = ((const uint_32t*)s)[3] ^ ((const uint_32t*)k)[3];
299 #elif 1
300  ((uint_8t*)d)[0] = ((uint_8t*)s)[0] ^ ((uint_8t*)k)[0];
301  ((uint_8t*)d)[1] = ((uint_8t*)s)[1] ^ ((uint_8t*)k)[1];
302  ((uint_8t*)d)[2] = ((uint_8t*)s)[2] ^ ((uint_8t*)k)[2];
303  ((uint_8t*)d)[3] = ((uint_8t*)s)[3] ^ ((uint_8t*)k)[3];
304  ((uint_8t*)d)[4] = ((uint_8t*)s)[4] ^ ((uint_8t*)k)[4];
305  ((uint_8t*)d)[5] = ((uint_8t*)s)[5] ^ ((uint_8t*)k)[5];
306  ((uint_8t*)d)[6] = ((uint_8t*)s)[6] ^ ((uint_8t*)k)[6];
307  ((uint_8t*)d)[7] = ((uint_8t*)s)[7] ^ ((uint_8t*)k)[7];
308  ((uint_8t*)d)[8] = ((uint_8t*)s)[8] ^ ((uint_8t*)k)[8];
309  ((uint_8t*)d)[9] = ((uint_8t*)s)[9] ^ ((uint_8t*)k)[9];
310  ((uint_8t*)d)[10] = ((uint_8t*)s)[10] ^ ((uint_8t*)k)[10];
311  ((uint_8t*)d)[11] = ((uint_8t*)s)[11] ^ ((uint_8t*)k)[11];
312  ((uint_8t*)d)[12] = ((uint_8t*)s)[12] ^ ((uint_8t*)k)[12];
313  ((uint_8t*)d)[13] = ((uint_8t*)s)[13] ^ ((uint_8t*)k)[13];
314  ((uint_8t*)d)[14] = ((uint_8t*)s)[14] ^ ((uint_8t*)k)[14];
315  ((uint_8t*)d)[15] = ((uint_8t*)s)[15] ^ ((uint_8t*)k)[15];
316 #else
317  block_copy(d, s);
318  xor_block(d, k);
319 #endif
320 }
321 
322 static void add_round_key(uint_8t d[N_BLOCK], const uint_8t k[N_BLOCK])
323 {
324  xor_block(d, k);
325 }
326 
328 {
329  uint_8t tt;
330 
331  st[0] = s_box(st[0]);
332  st[4] = s_box(st[4]);
333  st[8] = s_box(st[8]);
334  st[12] = s_box(st[12]);
335 
336  tt = st[1];
337  st[1] = s_box(st[5]);
338  st[5] = s_box(st[9]);
339  st[9] = s_box(st[13]);
340  st[13] = s_box(tt);
341 
342  tt = st[2];
343  st[2] = s_box(st[10]);
344  st[10] = s_box(tt);
345  tt = st[6];
346  st[6] = s_box(st[14]);
347  st[14] = s_box(tt);
348 
349  tt = st[15];
350  st[15] = s_box(st[11]);
351  st[11] = s_box(st[7]);
352  st[7] = s_box(st[3]);
353  st[3] = s_box(tt);
354 }
355 
357 {
358  uint_8t tt;
359 
360  st[0] = is_box(st[0]);
361  st[4] = is_box(st[4]);
362  st[8] = is_box(st[8]);
363  st[12] = is_box(st[12]);
364 
365  tt = st[13];
366  st[13] = is_box(st[9]);
367  st[9] = is_box(st[5]);
368  st[5] = is_box(st[1]);
369  st[1] = is_box(tt);
370 
371  tt = st[2];
372  st[2] = is_box(st[10]);
373  st[10] = is_box(tt);
374  tt = st[6];
375  st[6] = is_box(st[14]);
376  st[14] = is_box(tt);
377 
378  tt = st[3];
379  st[3] = is_box(st[7]);
380  st[7] = is_box(st[11]);
381  st[11] = is_box(st[15]);
382  st[15] = is_box(tt);
383 }
384 
385 #if defined(VERSION_1)
387 {
388  uint_8t st[N_BLOCK];
389  block_copy(st, dt);
390 #else
391 static void mix_sub_columns(uint_8t dt[N_BLOCK], uint_8t st[N_BLOCK])
392 {
393 #endif
394  dt[0] = gfm2_sb(st[0]) ^ gfm3_sb(st[5]) ^ s_box(st[10]) ^ s_box(st[15]);
395  dt[1] = s_box(st[0]) ^ gfm2_sb(st[5]) ^ gfm3_sb(st[10]) ^ s_box(st[15]);
396  dt[2] = s_box(st[0]) ^ s_box(st[5]) ^ gfm2_sb(st[10]) ^ gfm3_sb(st[15]);
397  dt[3] = gfm3_sb(st[0]) ^ s_box(st[5]) ^ s_box(st[10]) ^ gfm2_sb(st[15]);
398 
399  dt[4] = gfm2_sb(st[4]) ^ gfm3_sb(st[9]) ^ s_box(st[14]) ^ s_box(st[3]);
400  dt[5] = s_box(st[4]) ^ gfm2_sb(st[9]) ^ gfm3_sb(st[14]) ^ s_box(st[3]);
401  dt[6] = s_box(st[4]) ^ s_box(st[9]) ^ gfm2_sb(st[14]) ^ gfm3_sb(st[3]);
402  dt[7] = gfm3_sb(st[4]) ^ s_box(st[9]) ^ s_box(st[14]) ^ gfm2_sb(st[3]);
403 
404  dt[8] = gfm2_sb(st[8]) ^ gfm3_sb(st[13]) ^ s_box(st[2]) ^ s_box(st[7]);
405  dt[9] = s_box(st[8]) ^ gfm2_sb(st[13]) ^ gfm3_sb(st[2]) ^ s_box(st[7]);
406  dt[10] = s_box(st[8]) ^ s_box(st[13]) ^ gfm2_sb(st[2]) ^ gfm3_sb(st[7]);
407  dt[11] = gfm3_sb(st[8]) ^ s_box(st[13]) ^ s_box(st[2]) ^ gfm2_sb(st[7]);
408 
409  dt[12] = gfm2_sb(st[12]) ^ gfm3_sb(st[1]) ^ s_box(st[6]) ^ s_box(st[11]);
410  dt[13] = s_box(st[12]) ^ gfm2_sb(st[1]) ^ gfm3_sb(st[6]) ^ s_box(st[11]);
411  dt[14] = s_box(st[12]) ^ s_box(st[1]) ^ gfm2_sb(st[6]) ^ gfm3_sb(st[11]);
412  dt[15] = gfm3_sb(st[12]) ^ s_box(st[1]) ^ s_box(st[6]) ^ gfm2_sb(st[11]);
413 }
414 
415 #if defined(VERSION_1)
416 static void inv_mix_sub_columns(uint_8t dt[N_BLOCK])
417 {
418  uint_8t st[N_BLOCK];
419  block_copy(st, dt);
420 #else
421 static void inv_mix_sub_columns(uint_8t dt[N_BLOCK], uint_8t st[N_BLOCK])
422 {
423 #endif
424  dt[0] = is_box(gfm_e(st[0]) ^ gfm_b(st[1]) ^ gfm_d(st[2]) ^ gfm_9(st[3]));
425  dt[5] = is_box(gfm_9(st[0]) ^ gfm_e(st[1]) ^ gfm_b(st[2]) ^ gfm_d(st[3]));
426  dt[10] = is_box(gfm_d(st[0]) ^ gfm_9(st[1]) ^ gfm_e(st[2]) ^ gfm_b(st[3]));
427  dt[15] = is_box(gfm_b(st[0]) ^ gfm_d(st[1]) ^ gfm_9(st[2]) ^ gfm_e(st[3]));
428 
429  dt[4] = is_box(gfm_e(st[4]) ^ gfm_b(st[5]) ^ gfm_d(st[6]) ^ gfm_9(st[7]));
430  dt[9] = is_box(gfm_9(st[4]) ^ gfm_e(st[5]) ^ gfm_b(st[6]) ^ gfm_d(st[7]));
431  dt[14] = is_box(gfm_d(st[4]) ^ gfm_9(st[5]) ^ gfm_e(st[6]) ^ gfm_b(st[7]));
432  dt[3] = is_box(gfm_b(st[4]) ^ gfm_d(st[5]) ^ gfm_9(st[6]) ^ gfm_e(st[7]));
433 
434  dt[8] = is_box(gfm_e(st[8]) ^ gfm_b(st[9]) ^ gfm_d(st[10]) ^ gfm_9(st[11]));
435  dt[13] = is_box(gfm_9(st[8]) ^ gfm_e(st[9]) ^ gfm_b(st[10]) ^ gfm_d(st[11]));
436  dt[2] = is_box(gfm_d(st[8]) ^ gfm_9(st[9]) ^ gfm_e(st[10]) ^ gfm_b(st[11]));
437  dt[7] = is_box(gfm_b(st[8]) ^ gfm_d(st[9]) ^ gfm_9(st[10]) ^ gfm_e(st[11]));
438 
439  dt[12] = is_box(gfm_e(st[12]) ^ gfm_b(st[13]) ^ gfm_d(st[14]) ^ gfm_9(st[15]));
440  dt[1] = is_box(gfm_9(st[12]) ^ gfm_e(st[13]) ^ gfm_b(st[14]) ^ gfm_d(st[15]));
441  dt[6] = is_box(gfm_d(st[12]) ^ gfm_9(st[13]) ^ gfm_e(st[14]) ^ gfm_b(st[15]));
442  dt[11] = is_box(gfm_b(st[12]) ^ gfm_d(st[13]) ^ gfm_9(st[14]) ^ gfm_e(st[15]));
443 }
444 
445 #if defined(AES_ENC_PREKEYED) || defined(AES_DEC_PREKEYED)
446 
447 /* Set the cipher key for the pre-keyed version */
448 
449 return_type aes_set_key(const unsigned char key[], length_type keylen, aes_context ctx[1])
450 {
451  uint_8t cc, rc, hi;
452 
453  switch (keylen) {
454  case 16:
455  case 128:
456  keylen = 16;
457  break;
458  case 24:
459  case 192:
460  keylen = 24;
461  break;
462  case 32:
463  //case 256:
464  keylen = 32;
465  break;
466  default:
467  ctx->rnd = 0;
468  return -1;
469  }
470  block_copy_nn(ctx->ksch, key, keylen);
471  hi = (keylen + 28) << 2;
472  ctx->rnd = (hi >> 4) - 1;
473  for (cc = keylen, rc = 1; cc < hi; cc += 4) {
474  uint_8t tt, t0, t1, t2, t3;
475 
476  t0 = ctx->ksch[cc - 4];
477  t1 = ctx->ksch[cc - 3];
478  t2 = ctx->ksch[cc - 2];
479  t3 = ctx->ksch[cc - 1];
480  if (cc % keylen == 0) {
481  tt = t0;
482  t0 = s_box(t1) ^ rc;
483  t1 = s_box(t2);
484  t2 = s_box(t3);
485  t3 = s_box(tt);
486  rc = f2(rc);
487  } else if (keylen > 24 && cc % keylen == 16) {
488  t0 = s_box(t0);
489  t1 = s_box(t1);
490  t2 = s_box(t2);
491  t3 = s_box(t3);
492  }
493  tt = cc - keylen;
494  ctx->ksch[cc + 0] = ctx->ksch[tt + 0] ^ t0;
495  ctx->ksch[cc + 1] = ctx->ksch[tt + 1] ^ t1;
496  ctx->ksch[cc + 2] = ctx->ksch[tt + 2] ^ t2;
497  ctx->ksch[cc + 3] = ctx->ksch[tt + 3] ^ t3;
498  }
499  return 0;
500 }
501 
502 #endif
503 
504 #if defined(AES_ENC_PREKEYED)
505 
506 /* Encrypt a single block of 16 bytes */
507 
508 return_type aes_encrypt(const unsigned char in[N_BLOCK], unsigned char out[N_BLOCK], const aes_context ctx[1])
509 {
510  if (ctx->rnd) {
511  uint_8t s1[N_BLOCK], r;
512  copy_and_key(s1, in, ctx->ksch);
513 
514  for (r = 1; r < ctx->rnd; ++r)
515 #if defined(VERSION_1)
516  {
517  mix_sub_columns(s1);
518  add_round_key(s1, ctx->ksch + r * N_BLOCK);
519  }
520 #else
521  {
522  uint_8t s2[N_BLOCK];
523  mix_sub_columns(s2, s1);
524  copy_and_key(s1, s2, ctx->ksch + r * N_BLOCK);
525  }
526 #endif
527  shift_sub_rows(s1);
528  copy_and_key(out, s1, ctx->ksch + r * N_BLOCK);
529  } else {
530  return -1;
531  }
532  return 0;
533 }
534 
535 /* CBC encrypt a number of blocks (input and return an IV) */
536 
537 return_type aes_cbc_encrypt(const unsigned char* in, unsigned char* out, int n_block, unsigned char iv[N_BLOCK], const aes_context ctx[1])
538 {
539  while (n_block--) {
540  xor_block(iv, in);
541  if (aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) {
542  return EXIT_FAILURE;
543  }
544  memcpy(out, iv, N_BLOCK);
545  in += N_BLOCK;
546  out += N_BLOCK;
547  }
548  return EXIT_SUCCESS;
549 }
550 
551 #endif
552 
553 #if defined(AES_DEC_PREKEYED)
554 
555 /* Decrypt a single block of 16 bytes */
556 
557 return_type aes_decrypt(const unsigned char in[N_BLOCK], unsigned char out[N_BLOCK], const aes_context ctx[1])
558 {
559  if (ctx->rnd) {
560  uint_8t s1[N_BLOCK], r;
561  copy_and_key(s1, in, ctx->ksch + ctx->rnd * N_BLOCK);
562  inv_shift_sub_rows(s1);
563 
564  for (r = ctx->rnd; --r;)
565 #if defined(VERSION_1)
566  {
567  add_round_key(s1, ctx->ksch + r * N_BLOCK);
569  }
570 #else
571  {
572  uint_8t s2[N_BLOCK];
573  copy_and_key(s2, s1, ctx->ksch + r * N_BLOCK);
574  inv_mix_sub_columns(s1, s2);
575  }
576 #endif
577  copy_and_key(out, s1, ctx->ksch);
578  } else {
579  return -1;
580  }
581  return 0;
582 }
583 
584 /* CBC decrypt a number of blocks (input and return an IV) */
585 
586 return_type aes_cbc_decrypt(const unsigned char* in, unsigned char* out, int n_block, unsigned char iv[N_BLOCK], const aes_context ctx[1])
587 {
588  while (n_block--) {
589  uint_8t tmp[N_BLOCK];
590 
591  memcpy(tmp, in, N_BLOCK);
592  if (aes_decrypt(in, out, ctx) != EXIT_SUCCESS) {
593  return EXIT_FAILURE;
594  }
595  xor_block(out, iv);
596  memcpy(iv, tmp, N_BLOCK);
597  in += N_BLOCK;
598  out += N_BLOCK;
599  }
600  return EXIT_SUCCESS;
601 }
602 
603 #endif
604 
605 #if defined(AES_ENC_128_OTFK)
606 
607 /* The 'on the fly' encryption key update for for 128 bit keys */
608 
609 static void update_encrypt_key_128(uint_8t k[N_BLOCK], uint_8t* rc)
610 {
611  uint_8t cc;
612 
613  k[0] ^= s_box(k[13]) ^ *rc;
614  k[1] ^= s_box(k[14]);
615  k[2] ^= s_box(k[15]);
616  k[3] ^= s_box(k[12]);
617  *rc = f2(*rc);
618 
619  for (cc = 4; cc < 16; cc += 4) {
620  k[cc + 0] ^= k[cc - 4];
621  k[cc + 1] ^= k[cc - 3];
622  k[cc + 2] ^= k[cc - 2];
623  k[cc + 3] ^= k[cc - 1];
624  }
625 }
626 
627 /* Encrypt a single block of 16 bytes with 'on the fly' 128 bit keying */
628 
629 void aes_encrypt_128(const unsigned char in[N_BLOCK], unsigned char out[N_BLOCK], const unsigned char key[N_BLOCK], unsigned char o_key[N_BLOCK])
630 {
631  uint_8t s1[N_BLOCK], r, rc = 1;
632 
633  if (o_key != key) {
634  block_copy(o_key, key);
635  }
636  copy_and_key(s1, in, o_key);
637 
638  for (r = 1; r < 10; ++r)
639 #if defined(VERSION_1)
640  {
641  mix_sub_columns(s1);
642  update_encrypt_key_128(o_key, &rc);
643  add_round_key(s1, o_key);
644  }
645 #else
646  {
647  uint_8t s2[N_BLOCK];
648  mix_sub_columns(s2, s1);
649  update_encrypt_key_128(o_key, &rc);
650  copy_and_key(s1, s2, o_key);
651  }
652 #endif
653 
654  shift_sub_rows(s1);
655  update_encrypt_key_128(o_key, &rc);
656  copy_and_key(out, s1, o_key);
657 }
658 
659 #endif
660 
661 #if defined(AES_DEC_128_OTFK)
662 
663 /* The 'on the fly' decryption key update for for 128 bit keys */
664 
665 static void update_decrypt_key_128(uint_8t k[N_BLOCK], uint_8t* rc)
666 {
667  uint_8t cc;
668 
669  for (cc = 12; cc > 0; cc -= 4) {
670  k[cc + 0] ^= k[cc - 4];
671  k[cc + 1] ^= k[cc - 3];
672  k[cc + 2] ^= k[cc - 2];
673  k[cc + 3] ^= k[cc - 1];
674  }
675  *rc = d2(*rc);
676  k[0] ^= s_box(k[13]) ^ *rc;
677  k[1] ^= s_box(k[14]);
678  k[2] ^= s_box(k[15]);
679  k[3] ^= s_box(k[12]);
680 }
681 
682 /* Decrypt a single block of 16 bytes with 'on the fly' 128 bit keying */
683 
684 void aes_decrypt_128(const unsigned char in[N_BLOCK], unsigned char out[N_BLOCK], const unsigned char key[N_BLOCK], unsigned char o_key[N_BLOCK])
685 {
686  uint_8t s1[N_BLOCK], r, rc = 0x6c;
687  if (o_key != key) {
688  block_copy(o_key, key);
689  }
690 
691  copy_and_key(s1, in, o_key);
692  inv_shift_sub_rows(s1);
693 
694  for (r = 10; --r;)
695 #if defined(VERSION_1)
696  {
697  update_decrypt_key_128(o_key, &rc);
698  add_round_key(s1, o_key);
700  }
701 #else
702  {
703  uint_8t s2[N_BLOCK];
704  update_decrypt_key_128(o_key, &rc);
705  copy_and_key(s2, s1, o_key);
706  inv_mix_sub_columns(s1, s2);
707  }
708 #endif
709  update_decrypt_key_128(o_key, &rc);
710  copy_and_key(out, s1, o_key);
711 }
712 
713 #endif
714 
715 #if defined(AES_ENC_256_OTFK)
716 
717 /* The 'on the fly' encryption key update for for 256 bit keys */
718 
719 static void update_encrypt_key_256(uint_8t k[2 * N_BLOCK], uint_8t* rc)
720 {
721  uint_8t cc;
722 
723  k[0] ^= s_box(k[29]) ^ *rc;
724  k[1] ^= s_box(k[30]);
725  k[2] ^= s_box(k[31]);
726  k[3] ^= s_box(k[28]);
727  *rc = f2(*rc);
728 
729  for (cc = 4; cc < 16; cc += 4) {
730  k[cc + 0] ^= k[cc - 4];
731  k[cc + 1] ^= k[cc - 3];
732  k[cc + 2] ^= k[cc - 2];
733  k[cc + 3] ^= k[cc - 1];
734  }
735 
736  k[16] ^= s_box(k[12]);
737  k[17] ^= s_box(k[13]);
738  k[18] ^= s_box(k[14]);
739  k[19] ^= s_box(k[15]);
740 
741  for (cc = 20; cc < 32; cc += 4) {
742  k[cc + 0] ^= k[cc - 4];
743  k[cc + 1] ^= k[cc - 3];
744  k[cc + 2] ^= k[cc - 2];
745  k[cc + 3] ^= k[cc - 1];
746  }
747 }
748 
749 /* Encrypt a single block of 16 bytes with 'on the fly' 256 bit keying */
750 
751 void aes_encrypt_256(const unsigned char in[N_BLOCK], unsigned char out[N_BLOCK], const unsigned char key[2 * N_BLOCK], unsigned char o_key[2 * N_BLOCK])
752 {
753  uint_8t s1[N_BLOCK], r, rc = 1;
754  if (o_key != key) {
755  block_copy(o_key, key);
756  block_copy(o_key + 16, key + 16);
757  }
758  copy_and_key(s1, in, o_key);
759 
760  for (r = 1; r < 14; ++r)
761 #if defined(VERSION_1)
762  {
763  mix_sub_columns(s1);
764  if (r & 1) {
765  add_round_key(s1, o_key + 16);
766  } else {
767  update_encrypt_key_256(o_key, &rc);
768  add_round_key(s1, o_key);
769  }
770  }
771 #else
772  {
773  uint_8t s2[N_BLOCK];
774  mix_sub_columns(s2, s1);
775  if (r & 1) {
776  copy_and_key(s1, s2, o_key + 16);
777  } else {
778  update_encrypt_key_256(o_key, &rc);
779  copy_and_key(s1, s2, o_key);
780  }
781  }
782 #endif
783 
784  shift_sub_rows(s1);
785  update_encrypt_key_256(o_key, &rc);
786  copy_and_key(out, s1, o_key);
787 }
788 
789 #endif
790 
791 #if defined(AES_DEC_256_OTFK)
792 
793 /* The 'on the fly' encryption key update for for 256 bit keys */
794 
795 static void update_decrypt_key_256(uint_8t k[2 * N_BLOCK], uint_8t* rc)
796 {
797  uint_8t cc;
798 
799  for (cc = 28; cc > 16; cc -= 4) {
800  k[cc + 0] ^= k[cc - 4];
801  k[cc + 1] ^= k[cc - 3];
802  k[cc + 2] ^= k[cc - 2];
803  k[cc + 3] ^= k[cc - 1];
804  }
805 
806  k[16] ^= s_box(k[12]);
807  k[17] ^= s_box(k[13]);
808  k[18] ^= s_box(k[14]);
809  k[19] ^= s_box(k[15]);
810 
811  for (cc = 12; cc > 0; cc -= 4) {
812  k[cc + 0] ^= k[cc - 4];
813  k[cc + 1] ^= k[cc - 3];
814  k[cc + 2] ^= k[cc - 2];
815  k[cc + 3] ^= k[cc - 1];
816  }
817 
818  *rc = d2(*rc);
819  k[0] ^= s_box(k[29]) ^ *rc;
820  k[1] ^= s_box(k[30]);
821  k[2] ^= s_box(k[31]);
822  k[3] ^= s_box(k[28]);
823 }
824 
825 /* Decrypt a single block of 16 bytes with 'on the fly'
826  256 bit keying
827 */
828 void aes_decrypt_256(const unsigned char in[N_BLOCK], unsigned char out[N_BLOCK], const unsigned char key[2 * N_BLOCK], unsigned char o_key[2 * N_BLOCK])
829 {
830  uint_8t s1[N_BLOCK], r, rc = 0x80;
831 
832  if (o_key != key) {
833  block_copy(o_key, key);
834  block_copy(o_key + 16, key + 16);
835  }
836 
837  copy_and_key(s1, in, o_key);
838  inv_shift_sub_rows(s1);
839 
840  for (r = 14; --r;)
841 #if defined(VERSION_1)
842  {
843  if ((r & 1)) {
844  update_decrypt_key_256(o_key, &rc);
845  add_round_key(s1, o_key + 16);
846  } else {
847  add_round_key(s1, o_key);
848  }
850  }
851 #else
852  {
853  uint_8t s2[N_BLOCK];
854  if ((r & 1)) {
855  update_decrypt_key_256(o_key, &rc);
856  copy_and_key(s2, s1, o_key + 16);
857  } else {
858  copy_and_key(s2, s1, o_key);
859  }
860  inv_mix_sub_columns(s1, s2);
861  }
862 #endif
863  copy_and_key(out, s1, o_key);
864 }
865 
866 #endif
#define fb(x)
Definition: aes.c:92
static const uint_8t gfmul_b[256]
Definition: aes.c:120
static const uint_8t sbox[256]
Definition: aes.c:113
#define block_copy(d, s)
Definition: aes.c:220
static const uint_8t isbox[256]
Definition: aes.c:114
uint32_t uint_32t
Definition: aes.c:75
static void shift_sub_rows(uint_8t st[N_BLOCK])
Definition: aes.c:327
return_type aes_decrypt(const unsigned char in[N_BLOCK], unsigned char out[N_BLOCK], const aes_context ctx[1])
Definition: aes.c:557
#define gfm_9(x)
Definition: aes.c:128
return_type aes_cbc_decrypt(const unsigned char *in, unsigned char *out, int n_block, unsigned char iv[N_BLOCK], const aes_context ctx[1])
Definition: aes.c:586
#define gfm_d(x)
Definition: aes.c:130
static const uint_8t gfmul_d[256]
Definition: aes.c:121
#define sb_data(w)
Definition: aes.c:98
#define fe(x)
Definition: aes.c:94
#define f1(x)
Definition: aes.c:84
return_type aes_cbc_encrypt(const unsigned char *in, unsigned char *out, int n_block, unsigned char iv[N_BLOCK], const aes_context ctx[1])
Definition: aes.c:537
#define s_box(x)
Definition: aes.c:124
static const uint_8t gfmul_e[256]
Definition: aes.c:122
uint_8t rnd
Definition: aes.h:76
#define f3(x)
Definition: aes.c:90
#define is_box(x)
Definition: aes.c:125
#define block_copy_nn(d, s, l)
Definition: aes.c:219
static void mix_sub_columns(uint_8t dt[N_BLOCK])
Definition: aes.c:386
#define f2(x)
Definition: aes.c:85
static const uint_8t gfmul_9[256]
Definition: aes.c:119
static const uint_8t gfm2_sbox[256]
Definition: aes.c:116
#define gfm_e(x)
Definition: aes.c:131
uint_8t length_type
Definition: aes.h:71
#define BPOLY
Definition: aes.c:81
static const uint_8t gfm3_sbox[256]
Definition: aes.c:117
#define VERSION_1
Definition: aes.c:68
unsigned char uint_8t
Definition: aes.h:63
#define f9(x)
Definition: aes.c:91
static void inv_shift_sub_rows(uint_8t st[N_BLOCK])
Definition: aes.c:356
static void copy_and_key(void *d, const void *s, const void *k)
Definition: aes.c:292
#define mm_data(w)
Definition: aes.c:108
static void inv_mix_sub_columns(uint_8t dt[N_BLOCK])
Definition: aes.c:416
#define gfm_b(x)
Definition: aes.c:129
uint_8t return_type
Definition: aes.h:65
#define N_BLOCK
Definition: aes.h:60
#define gfm2_sb(x)
Definition: aes.c:126
static void xor_block(void *d, const void *s)
Definition: aes.c:265
return_type aes_encrypt(const unsigned char in[N_BLOCK], unsigned char out[N_BLOCK], const aes_context ctx[1])
Definition: aes.c:508
return_type aes_set_key(const unsigned char key[], length_type keylen, aes_context ctx[1])
Definition: aes.c:449
#define isb_data(w)
Definition: aes.c:103
uint_8t ksch[(N_MAX_ROUNDS+1)*N_BLOCK]
Definition: aes.h:75
#define fd(x)
Definition: aes.c:93
#define d2(x)
Definition: aes.c:88
static void add_round_key(uint_8t d[N_BLOCK], const uint_8t k[N_BLOCK])
Definition: aes.c:322
#define gfm3_sb(x)
Definition: aes.c:127