Bitcoin ABC  0.29.2
P2P Digital Currency
ctaes.c
Go to the documentation of this file.
1 /*********************************************************************
2  * Copyright (c) 2016 Pieter Wuille *
3  * Distributed under the MIT software license, see the accompanying *
4  * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
5  **********************************************************************/
6 
7 /* Constant time, unoptimized, concise, plain C, AES implementation
8  * Based On:
9  * Emilia Kasper and Peter Schwabe, Faster and Timing-Attack Resistant AES-GCM
10  * http://www.iacr.org/archive/ches2009/57470001/57470001.pdf
11  * But using 8 16-bit integers representing a single AES state rather than 8
12  * 128-bit integers representing 8 AES states.
13  */
14 
15 #include "ctaes.h"
16 
17 /* Slice variable slice_i contains the i'th bit of the 16 state variables in
18  * this order:
19  * 0 1 2 3
20  * 4 5 6 7
21  * 8 9 10 11
22  * 12 13 14 15
23  */
24 
29 static void LoadByte(AES_state *s, uint8_t byte, int r, int c) {
30  int i;
31  for (i = 0; i < 8; i++) {
32  s->slice[i] |= (byte & 1) << (r * 4 + c);
33  byte >>= 1;
34  }
35 }
36 
38 static void LoadBytes(AES_state *s, const uint8_t *data16) {
39  int c;
40  for (c = 0; c < 4; c++) {
41  int r;
42  for (r = 0; r < 4; r++) {
43  LoadByte(s, *(data16++), r, c);
44  }
45  }
46 }
47 
49 static void SaveBytes(uint8_t *data16, const AES_state *s) {
50  int c;
51  for (c = 0; c < 4; c++) {
52  int r;
53  for (r = 0; r < 4; r++) {
54  int b;
55  uint8_t v = 0;
56  for (b = 0; b < 8; b++) {
57  v |= ((s->slice[b] >> (r * 4 + c)) & 1) << b;
58  }
59  *(data16++) = v;
60  }
61  }
62 }
63 
64 /* S-box implementation based on the gate logic from:
65  * Joan Boyar and Rene Peralta, A depth-16 circuit for the AES S-box.
66  * https://eprint.iacr.org/2011/332.pdf
67  */
68 static void SubBytes(AES_state *s, int inv) {
69  /* Load the bit slices */
70  uint16_t U0 = s->slice[7], U1 = s->slice[6], U2 = s->slice[5],
71  U3 = s->slice[4];
72  uint16_t U4 = s->slice[3], U5 = s->slice[2], U6 = s->slice[1],
73  U7 = s->slice[0];
74 
75  uint16_t T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
76  T16;
77  uint16_t T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, D;
78  uint16_t M1, M6, M11, M13, M15, M20, M21, M22, M23, M25, M37, M38, M39, M40;
79  uint16_t M41, M42, M43, M44, M45, M46, M47, M48, M49, M50, M51, M52, M53,
80  M54;
81  uint16_t M55, M56, M57, M58, M59, M60, M61, M62, M63;
82 
83  if (inv) {
84  uint16_t R5, R13, R17, R18, R19;
85  /* Undo linear postprocessing */
86  T23 = U0 ^ U3;
87  T22 = ~(U1 ^ U3);
88  T2 = ~(U0 ^ U1);
89  T1 = U3 ^ U4;
90  T24 = ~(U4 ^ U7);
91  R5 = U6 ^ U7;
92  T8 = ~(U1 ^ T23);
93  T19 = T22 ^ R5;
94  T9 = ~(U7 ^ T1);
95  T10 = T2 ^ T24;
96  T13 = T2 ^ R5;
97  T3 = T1 ^ R5;
98  T25 = ~(U2 ^ T1);
99  R13 = U1 ^ U6;
100  T17 = ~(U2 ^ T19);
101  T20 = T24 ^ R13;
102  T4 = U4 ^ T8;
103  R17 = ~(U2 ^ U5);
104  R18 = ~(U5 ^ U6);
105  R19 = ~(U2 ^ U4);
106  D = U0 ^ R17;
107  T6 = T22 ^ R17;
108  T16 = R13 ^ R19;
109  T27 = T1 ^ R18;
110  T15 = T10 ^ T27;
111  T14 = T10 ^ R18;
112  T26 = T3 ^ T16;
113  } else {
114  /* Linear preprocessing. */
115  T1 = U0 ^ U3;
116  T2 = U0 ^ U5;
117  T3 = U0 ^ U6;
118  T4 = U3 ^ U5;
119  T5 = U4 ^ U6;
120  T6 = T1 ^ T5;
121  T7 = U1 ^ U2;
122  T8 = U7 ^ T6;
123  T9 = U7 ^ T7;
124  T10 = T6 ^ T7;
125  T11 = U1 ^ U5;
126  T12 = U2 ^ U5;
127  T13 = T3 ^ T4;
128  T14 = T6 ^ T11;
129  T15 = T5 ^ T11;
130  T16 = T5 ^ T12;
131  T17 = T9 ^ T16;
132  T18 = U3 ^ U7;
133  T19 = T7 ^ T18;
134  T20 = T1 ^ T19;
135  T21 = U6 ^ U7;
136  T22 = T7 ^ T21;
137  T23 = T2 ^ T22;
138  T24 = T2 ^ T10;
139  T25 = T20 ^ T17;
140  T26 = T3 ^ T16;
141  T27 = T1 ^ T12;
142  D = U7;
143  }
144 
145  /* Non-linear transformation (shared between the forward and backward case)
146  */
147  M1 = T13 & T6;
148  M6 = T3 & T16;
149  M11 = T1 & T15;
150  M13 = (T4 & T27) ^ M11;
151  M15 = (T2 & T10) ^ M11;
152  M20 = T14 ^ M1 ^ (T23 & T8) ^ M13;
153  M21 = (T19 & D) ^ M1 ^ T24 ^ M15;
154  M22 = T26 ^ M6 ^ (T22 & T9) ^ M13;
155  M23 = (T20 & T17) ^ M6 ^ M15 ^ T25;
156  M25 = M22 & M20;
157  M37 = M21 ^ ((M20 ^ M21) & (M23 ^ M25));
158  M38 = M20 ^ M25 ^ (M21 | (M20 & M23));
159  M39 = M23 ^ ((M22 ^ M23) & (M21 ^ M25));
160  M40 = M22 ^ M25 ^ (M23 | (M21 & M22));
161  M41 = M38 ^ M40;
162  M42 = M37 ^ M39;
163  M43 = M37 ^ M38;
164  M44 = M39 ^ M40;
165  M45 = M42 ^ M41;
166  M46 = M44 & T6;
167  M47 = M40 & T8;
168  M48 = M39 & D;
169  M49 = M43 & T16;
170  M50 = M38 & T9;
171  M51 = M37 & T17;
172  M52 = M42 & T15;
173  M53 = M45 & T27;
174  M54 = M41 & T10;
175  M55 = M44 & T13;
176  M56 = M40 & T23;
177  M57 = M39 & T19;
178  M58 = M43 & T3;
179  M59 = M38 & T22;
180  M60 = M37 & T20;
181  M61 = M42 & T1;
182  M62 = M45 & T4;
183  M63 = M41 & T2;
184 
185  if (inv) {
186  /* Undo linear preprocessing */
187  uint16_t P0 = M52 ^ M61;
188  uint16_t P1 = M58 ^ M59;
189  uint16_t P2 = M54 ^ M62;
190  uint16_t P3 = M47 ^ M50;
191  uint16_t P4 = M48 ^ M56;
192  uint16_t P5 = M46 ^ M51;
193  uint16_t P6 = M49 ^ M60;
194  uint16_t P7 = P0 ^ P1;
195  uint16_t P8 = M50 ^ M53;
196  uint16_t P9 = M55 ^ M63;
197  uint16_t P10 = M57 ^ P4;
198  uint16_t P11 = P0 ^ P3;
199  uint16_t P12 = M46 ^ M48;
200  uint16_t P13 = M49 ^ M51;
201  uint16_t P14 = M49 ^ M62;
202  uint16_t P15 = M54 ^ M59;
203  uint16_t P16 = M57 ^ M61;
204  uint16_t P17 = M58 ^ P2;
205  uint16_t P18 = M63 ^ P5;
206  uint16_t P19 = P2 ^ P3;
207  uint16_t P20 = P4 ^ P6;
208  uint16_t P22 = P2 ^ P7;
209  uint16_t P23 = P7 ^ P8;
210  uint16_t P24 = P5 ^ P7;
211  uint16_t P25 = P6 ^ P10;
212  uint16_t P26 = P9 ^ P11;
213  uint16_t P27 = P10 ^ P18;
214  uint16_t P28 = P11 ^ P25;
215  uint16_t P29 = P15 ^ P20;
216  s->slice[7] = P13 ^ P22;
217  s->slice[6] = P26 ^ P29;
218  s->slice[5] = P17 ^ P28;
219  s->slice[4] = P12 ^ P22;
220  s->slice[3] = P23 ^ P27;
221  s->slice[2] = P19 ^ P24;
222  s->slice[1] = P14 ^ P23;
223  s->slice[0] = P9 ^ P16;
224  } else {
225  /* Linear postprocessing */
226  uint16_t L0 = M61 ^ M62;
227  uint16_t L1 = M50 ^ M56;
228  uint16_t L2 = M46 ^ M48;
229  uint16_t L3 = M47 ^ M55;
230  uint16_t L4 = M54 ^ M58;
231  uint16_t L5 = M49 ^ M61;
232  uint16_t L6 = M62 ^ L5;
233  uint16_t L7 = M46 ^ L3;
234  uint16_t L8 = M51 ^ M59;
235  uint16_t L9 = M52 ^ M53;
236  uint16_t L10 = M53 ^ L4;
237  uint16_t L11 = M60 ^ L2;
238  uint16_t L12 = M48 ^ M51;
239  uint16_t L13 = M50 ^ L0;
240  uint16_t L14 = M52 ^ M61;
241  uint16_t L15 = M55 ^ L1;
242  uint16_t L16 = M56 ^ L0;
243  uint16_t L17 = M57 ^ L1;
244  uint16_t L18 = M58 ^ L8;
245  uint16_t L19 = M63 ^ L4;
246  uint16_t L20 = L0 ^ L1;
247  uint16_t L21 = L1 ^ L7;
248  uint16_t L22 = L3 ^ L12;
249  uint16_t L23 = L18 ^ L2;
250  uint16_t L24 = L15 ^ L9;
251  uint16_t L25 = L6 ^ L10;
252  uint16_t L26 = L7 ^ L9;
253  uint16_t L27 = L8 ^ L10;
254  uint16_t L28 = L11 ^ L14;
255  uint16_t L29 = L11 ^ L17;
256  s->slice[7] = L6 ^ L24;
257  s->slice[6] = ~(L16 ^ L26);
258  s->slice[5] = ~(L19 ^ L28);
259  s->slice[4] = L6 ^ L21;
260  s->slice[3] = L20 ^ L22;
261  s->slice[2] = L25 ^ L29;
262  s->slice[1] = ~(L13 ^ L27);
263  s->slice[0] = ~(L6 ^ L23);
264  }
265 }
266 
267 #define BIT_RANGE(from, to) (((1 << ((to) - (from))) - 1) << (from))
268 
269 #define BIT_RANGE_LEFT(x, from, to, shift) \
270  (((x)&BIT_RANGE((from), (to))) << (shift))
271 #define BIT_RANGE_RIGHT(x, from, to, shift) \
272  (((x)&BIT_RANGE((from), (to))) >> (shift))
273 
274 static void ShiftRows(AES_state *s) {
275  int i;
276  for (i = 0; i < 8; i++) {
277  uint16_t v = s->slice[i];
278  s->slice[i] =
279  (v & BIT_RANGE(0, 4)) | BIT_RANGE_LEFT(v, 4, 5, 3) |
280  BIT_RANGE_RIGHT(v, 5, 8, 1) | BIT_RANGE_LEFT(v, 8, 10, 2) |
281  BIT_RANGE_RIGHT(v, 10, 12, 2) | BIT_RANGE_LEFT(v, 12, 15, 1) |
282  BIT_RANGE_RIGHT(v, 15, 16, 3);
283  }
284 }
285 
286 static void InvShiftRows(AES_state *s) {
287  int i;
288  for (i = 0; i < 8; i++) {
289  uint16_t v = s->slice[i];
290  s->slice[i] =
291  (v & BIT_RANGE(0, 4)) | BIT_RANGE_LEFT(v, 4, 7, 1) |
292  BIT_RANGE_RIGHT(v, 7, 8, 3) | BIT_RANGE_LEFT(v, 8, 10, 2) |
293  BIT_RANGE_RIGHT(v, 10, 12, 2) | BIT_RANGE_LEFT(v, 12, 13, 3) |
294  BIT_RANGE_RIGHT(v, 13, 16, 1);
295  }
296 }
297 
298 #define ROT(x, b) (((x) >> ((b)*4)) | ((x) << ((4 - (b)) * 4)))
299 
300 static void MixColumns(AES_state *s, int inv) {
301  /* The MixColumns transform treats the bytes of the columns of the state as
302  * coefficients of a 3rd degree polynomial over GF(2^8) and multiplies them
303  * by the fixed polynomial a(x) = {03}x^3 + {01}x^2 + {01}x + {02}, modulo
304  * x^4 + {01}.
305  *
306  * In the inverse transform, we multiply by the inverse of a(x),
307  * a^-1(x) = {0b}x^3 + {0d}x^2 + {09}x + {0e}. This is equal to
308  * a(x) * ({04}x^2 + {05}), so we can reuse the forward transform's code
309  * (found in OpenSSL's bsaes-x86_64.pl, attributed to Jussi Kivilinna)
310  *
311  * In the bitsliced representation, a multiplication of every column by x
312  * mod x^4 + 1 is simply a right rotation.
313  */
314 
315  /* Shared for both directions is a multiplication by a(x), which can be
316  * rewritten as (x^3 + x^2 + x) + {02}*(x^3 + {01}).
317  *
318  * First compute s into the s? variables, (x^3 + {01}) * s into the s?_01
319  * variables and (x^3 + x^2 + x)*s into the s?_123 variables.
320  */
321  uint16_t s0 = s->slice[0], s1 = s->slice[1], s2 = s->slice[2],
322  s3 = s->slice[3];
323  uint16_t s4 = s->slice[4], s5 = s->slice[5], s6 = s->slice[6],
324  s7 = s->slice[7];
325  uint16_t s0_01 = s0 ^ ROT(s0, 1), s0_123 = ROT(s0_01, 1) ^ ROT(s0, 3);
326  uint16_t s1_01 = s1 ^ ROT(s1, 1), s1_123 = ROT(s1_01, 1) ^ ROT(s1, 3);
327  uint16_t s2_01 = s2 ^ ROT(s2, 1), s2_123 = ROT(s2_01, 1) ^ ROT(s2, 3);
328  uint16_t s3_01 = s3 ^ ROT(s3, 1), s3_123 = ROT(s3_01, 1) ^ ROT(s3, 3);
329  uint16_t s4_01 = s4 ^ ROT(s4, 1), s4_123 = ROT(s4_01, 1) ^ ROT(s4, 3);
330  uint16_t s5_01 = s5 ^ ROT(s5, 1), s5_123 = ROT(s5_01, 1) ^ ROT(s5, 3);
331  uint16_t s6_01 = s6 ^ ROT(s6, 1), s6_123 = ROT(s6_01, 1) ^ ROT(s6, 3);
332  uint16_t s7_01 = s7 ^ ROT(s7, 1), s7_123 = ROT(s7_01, 1) ^ ROT(s7, 3);
333  /* Now compute s = s?_123 + {02} * s?_01. */
334  s->slice[0] = s7_01 ^ s0_123;
335  s->slice[1] = s7_01 ^ s0_01 ^ s1_123;
336  s->slice[2] = s1_01 ^ s2_123;
337  s->slice[3] = s7_01 ^ s2_01 ^ s3_123;
338  s->slice[4] = s7_01 ^ s3_01 ^ s4_123;
339  s->slice[5] = s4_01 ^ s5_123;
340  s->slice[6] = s5_01 ^ s6_123;
341  s->slice[7] = s6_01 ^ s7_123;
342  if (inv) {
343  /* In the reverse direction, we further need to multiply by
344  * {04}x^2 + {05}, which can be written as {04} * (x^2 + {01}) + {01}.
345  *
346  * First compute (x^2 + {01}) * s into the t?_02 variables: */
347  uint16_t t0_02 = s->slice[0] ^ ROT(s->slice[0], 2);
348  uint16_t t1_02 = s->slice[1] ^ ROT(s->slice[1], 2);
349  uint16_t t2_02 = s->slice[2] ^ ROT(s->slice[2], 2);
350  uint16_t t3_02 = s->slice[3] ^ ROT(s->slice[3], 2);
351  uint16_t t4_02 = s->slice[4] ^ ROT(s->slice[4], 2);
352  uint16_t t5_02 = s->slice[5] ^ ROT(s->slice[5], 2);
353  uint16_t t6_02 = s->slice[6] ^ ROT(s->slice[6], 2);
354  uint16_t t7_02 = s->slice[7] ^ ROT(s->slice[7], 2);
355  /* And then update s += {04} * t?_02 */
356  s->slice[0] ^= t6_02;
357  s->slice[1] ^= t6_02 ^ t7_02;
358  s->slice[2] ^= t0_02 ^ t7_02;
359  s->slice[3] ^= t1_02 ^ t6_02;
360  s->slice[4] ^= t2_02 ^ t6_02 ^ t7_02;
361  s->slice[5] ^= t3_02 ^ t7_02;
362  s->slice[6] ^= t4_02;
363  s->slice[7] ^= t5_02;
364  }
365 }
366 
367 static void AddRoundKey(AES_state *s, const AES_state *round) {
368  int b;
369  for (b = 0; b < 8; b++) {
370  s->slice[b] ^= round->slice[b];
371  }
372 }
373 
375 static void GetOneColumn(AES_state *s, const AES_state *a, int c) {
376  int b;
377  for (b = 0; b < 8; b++) {
378  s->slice[b] = (a->slice[b] >> c) & 0x1111;
379  }
380 }
381 
383 static void KeySetupColumnMix(AES_state *s, AES_state *r, const AES_state *a,
384  int c1, int c2) {
385  int b;
386  for (b = 0; b < 8; b++) {
387  r->slice[b] |=
388  ((s->slice[b] ^= ((a->slice[b] >> c2) & 0x1111)) & 0x1111) << c1;
389  }
390 }
391 
393 static void KeySetupTransform(AES_state *s, const AES_state *r) {
394  int b;
395  for (b = 0; b < 8; b++) {
396  s->slice[b] = ((s->slice[b] >> 4) | (s->slice[b] << 12)) ^ r->slice[b];
397  }
398 }
399 
400 /* Multiply the cells in s by x, as polynomials over GF(2) mod x^8 + x^4 + x^3 +
401  * x + 1 */
402 static void MultX(AES_state *s) {
403  uint16_t top = s->slice[7];
404  s->slice[7] = s->slice[6];
405  s->slice[6] = s->slice[5];
406  s->slice[5] = s->slice[4];
407  s->slice[4] = s->slice[3] ^ top;
408  s->slice[3] = s->slice[2] ^ top;
409  s->slice[2] = s->slice[1];
410  s->slice[1] = s->slice[0] ^ top;
411  s->slice[0] = top;
412 }
413 
423 static void AES_setup(AES_state *rounds, const uint8_t *key, int nkeywords,
424  int nrounds) {
425  int i;
426 
427  /* The one-byte round constant */
428  AES_state rcon = {{1, 0, 0, 0, 0, 0, 0, 0}};
429  /* The number of the word being generated, modulo nkeywords */
430  int pos = 0;
431  /* The column representing the word currently being processed */
432  AES_state column;
433 
434  for (i = 0; i < nrounds + 1; i++) {
435  int b;
436  for (b = 0; b < 8; b++) {
437  rounds[i].slice[b] = 0;
438  }
439  }
440 
441  /* The first nkeywords round columns are just taken from the key directly.
442  */
443  for (i = 0; i < nkeywords; i++) {
444  int r;
445  for (r = 0; r < 4; r++) {
446  LoadByte(&rounds[i >> 2], *(key++), r, i & 3);
447  }
448  }
449 
450  GetOneColumn(&column, &rounds[(nkeywords - 1) >> 2], (nkeywords - 1) & 3);
451 
452  for (i = nkeywords; i < 4 * (nrounds + 1); i++) {
453  /* Transform column */
454  if (pos == 0) {
455  SubBytes(&column, 0);
456  KeySetupTransform(&column, &rcon);
457  MultX(&rcon);
458  } else if (nkeywords > 6 && pos == 4) {
459  SubBytes(&column, 0);
460  }
461  if (++pos == nkeywords) pos = 0;
462  KeySetupColumnMix(&column, &rounds[i >> 2],
463  &rounds[(i - nkeywords) >> 2], i & 3,
464  (i - nkeywords) & 3);
465  }
466 }
467 
468 static void AES_encrypt(const AES_state *rounds, int nrounds, uint8_t *cipher16,
469  const uint8_t *plain16) {
470  AES_state s = {{0}};
471  int round;
472 
473  LoadBytes(&s, plain16);
474  AddRoundKey(&s, rounds++);
475 
476  for (round = 1; round < nrounds; round++) {
477  SubBytes(&s, 0);
478  ShiftRows(&s);
479  MixColumns(&s, 0);
480  AddRoundKey(&s, rounds++);
481  }
482 
483  SubBytes(&s, 0);
484  ShiftRows(&s);
485  AddRoundKey(&s, rounds);
486 
487  SaveBytes(cipher16, &s);
488 }
489 
490 static void AES_decrypt(const AES_state *rounds, int nrounds, uint8_t *plain16,
491  const uint8_t *cipher16) {
492  /* Most AES decryption implementations use the alternate scheme
493  * (the Equivalent Inverse Cipher), which allows for more code reuse between
494  * the encryption and decryption code, but requires separate setup for both.
495  */
496  AES_state s = {{0}};
497  int round;
498 
499  rounds += nrounds;
500 
501  LoadBytes(&s, cipher16);
502  AddRoundKey(&s, rounds--);
503 
504  for (round = 1; round < nrounds; round++) {
505  InvShiftRows(&s);
506  SubBytes(&s, 1);
507  AddRoundKey(&s, rounds--);
508  MixColumns(&s, 1);
509  }
510 
511  InvShiftRows(&s);
512  SubBytes(&s, 1);
513  AddRoundKey(&s, rounds);
514 
515  SaveBytes(plain16, &s);
516 }
517 
518 void AES128_init(AES128_ctx *ctx, const uint8_t *key16) {
519  AES_setup(ctx->rk, key16, 4, 10);
520 }
521 
522 void AES128_encrypt(const AES128_ctx *ctx, size_t blocks, uint8_t *cipher16,
523  const uint8_t *plain16) {
524  while (blocks--) {
525  AES_encrypt(ctx->rk, 10, cipher16, plain16);
526  cipher16 += 16;
527  plain16 += 16;
528  }
529 }
530 
531 void AES128_decrypt(const AES128_ctx *ctx, size_t blocks, uint8_t *plain16,
532  const uint8_t *cipher16) {
533  while (blocks--) {
534  AES_decrypt(ctx->rk, 10, plain16, cipher16);
535  cipher16 += 16;
536  plain16 += 16;
537  }
538 }
539 
540 void AES192_init(AES192_ctx *ctx, const uint8_t *key24) {
541  AES_setup(ctx->rk, key24, 6, 12);
542 }
543 
544 void AES192_encrypt(const AES192_ctx *ctx, size_t blocks, uint8_t *cipher16,
545  const uint8_t *plain16) {
546  while (blocks--) {
547  AES_encrypt(ctx->rk, 12, cipher16, plain16);
548  cipher16 += 16;
549  plain16 += 16;
550  }
551 }
552 
553 void AES192_decrypt(const AES192_ctx *ctx, size_t blocks, uint8_t *plain16,
554  const uint8_t *cipher16) {
555  while (blocks--) {
556  AES_decrypt(ctx->rk, 12, plain16, cipher16);
557  cipher16 += 16;
558  plain16 += 16;
559  }
560 }
561 
562 void AES256_init(AES256_ctx *ctx, const uint8_t *key32) {
563  AES_setup(ctx->rk, key32, 8, 14);
564 }
565 
566 void AES256_encrypt(const AES256_ctx *ctx, size_t blocks, uint8_t *cipher16,
567  const uint8_t *plain16) {
568  while (blocks--) {
569  AES_encrypt(ctx->rk, 14, cipher16, plain16);
570  cipher16 += 16;
571  plain16 += 16;
572  }
573 }
574 
575 void AES256_decrypt(const AES256_ctx *ctx, size_t blocks, uint8_t *plain16,
576  const uint8_t *cipher16) {
577  while (blocks--) {
578  AES_decrypt(ctx->rk, 14, plain16, cipher16);
579  cipher16 += 16;
580  plain16 += 16;
581  }
582 }
secp256k1_context * ctx
void AES192_decrypt(const AES192_ctx *ctx, size_t blocks, uint8_t *plain16, const uint8_t *cipher16)
Definition: ctaes.c:553
void AES128_decrypt(const AES128_ctx *ctx, size_t blocks, uint8_t *plain16, const uint8_t *cipher16)
Definition: ctaes.c:531
static void SaveBytes(uint8_t *data16, const AES_state *s)
Convert 8 sliced integers into 16 bytes of data.
Definition: ctaes.c:49
static void LoadByte(AES_state *s, uint8_t byte, int r, int c)
Convert a byte to sliced form, storing it corresponding to given row and column in s.
Definition: ctaes.c:29
void AES256_decrypt(const AES256_ctx *ctx, size_t blocks, uint8_t *plain16, const uint8_t *cipher16)
Definition: ctaes.c:575
static void KeySetupColumnMix(AES_state *s, AES_state *r, const AES_state *a, int c1, int c2)
column_c1(r) |= (column_0(s) ^= column_c2(a))
Definition: ctaes.c:383
void AES128_init(AES128_ctx *ctx, const uint8_t *key16)
Definition: ctaes.c:518
static void LoadBytes(AES_state *s, const uint8_t *data16)
Load 16 bytes of data into 8 sliced integers.
Definition: ctaes.c:38
static void InvShiftRows(AES_state *s)
Definition: ctaes.c:286
static void SubBytes(AES_state *s, int inv)
Definition: ctaes.c:68
static void AES_setup(AES_state *rounds, const uint8_t *key, int nkeywords, int nrounds)
Expand the cipher key into the key schedule.
Definition: ctaes.c:423
#define BIT_RANGE_RIGHT(x, from, to, shift)
Definition: ctaes.c:271
void AES256_encrypt(const AES256_ctx *ctx, size_t blocks, uint8_t *cipher16, const uint8_t *plain16)
Definition: ctaes.c:566
void AES192_encrypt(const AES192_ctx *ctx, size_t blocks, uint8_t *cipher16, const uint8_t *plain16)
Definition: ctaes.c:544
static void AES_encrypt(const AES_state *rounds, int nrounds, uint8_t *cipher16, const uint8_t *plain16)
Definition: ctaes.c:468
static void KeySetupTransform(AES_state *s, const AES_state *r)
Rotate the rows in s one position upwards, and xor in r.
Definition: ctaes.c:393
void AES256_init(AES256_ctx *ctx, const uint8_t *key32)
Definition: ctaes.c:562
static void AddRoundKey(AES_state *s, const AES_state *round)
Definition: ctaes.c:367
void AES128_encrypt(const AES128_ctx *ctx, size_t blocks, uint8_t *cipher16, const uint8_t *plain16)
Definition: ctaes.c:522
static void AES_decrypt(const AES_state *rounds, int nrounds, uint8_t *plain16, const uint8_t *cipher16)
Definition: ctaes.c:490
static void ShiftRows(AES_state *s)
Definition: ctaes.c:274
static void MixColumns(AES_state *s, int inv)
Definition: ctaes.c:300
void AES192_init(AES192_ctx *ctx, const uint8_t *key24)
Definition: ctaes.c:540
static void GetOneColumn(AES_state *s, const AES_state *a, int c)
column_0(s) = column_c(a)
Definition: ctaes.c:375
static void MultX(AES_state *s)
Definition: ctaes.c:402
#define BIT_RANGE(from, to)
Definition: ctaes.c:267
#define BIT_RANGE_LEFT(x, from, to, shift)
Definition: ctaes.c:269
#define ROT(x, b)
Definition: ctaes.c:298
uint16_t slice[8]
Definition: ctaes.h:14