1 /*
2 * copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at>
3 *
4 * some optimization ideas from aes128.c by Reimar Doeffinger
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <string.h>
24
25 #include "config.h"
26 #include "aes.h"
27 #include "aes_internal.h"
28 #include "error.h"
29 #include "intreadwrite.h"
30 #include "macros.h"
31 #include "mem.h"
32
33 const int av_aes_size= sizeof(AVAES);
34
av_aes_alloc(void)35 struct AVAES *av_aes_alloc(void)
36 {
37 return av_mallocz(sizeof(struct AVAES));
38 }
39
40 static const uint8_t rcon[10] = {
41 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36
42 };
43
44 static uint8_t sbox[256];
45 static uint8_t inv_sbox[256];
46 #if CONFIG_SMALL
47 static uint32_t enc_multbl[1][256];
48 static uint32_t dec_multbl[1][256];
49 #else
50 static uint32_t enc_multbl[4][256];
51 static uint32_t dec_multbl[4][256];
52 #endif
53
54 #if HAVE_BIGENDIAN
55 # define ROT(x, s) (((x) >> (s)) | ((x) << (32-(s))))
56 #else
57 # define ROT(x, s) (((x) << (s)) | ((x) >> (32-(s))))
58 #endif
59
addkey(av_aes_block * dst,const av_aes_block * src,const av_aes_block * round_key)60 static inline void addkey(av_aes_block *dst, const av_aes_block *src,
61 const av_aes_block *round_key)
62 {
63 dst->u64[0] = src->u64[0] ^ round_key->u64[0];
64 dst->u64[1] = src->u64[1] ^ round_key->u64[1];
65 }
66
addkey_s(av_aes_block * dst,const uint8_t * src,const av_aes_block * round_key)67 static inline void addkey_s(av_aes_block *dst, const uint8_t *src,
68 const av_aes_block *round_key)
69 {
70 dst->u64[0] = AV_RN64(src) ^ round_key->u64[0];
71 dst->u64[1] = AV_RN64(src + 8) ^ round_key->u64[1];
72 }
73
addkey_d(uint8_t * dst,const av_aes_block * src,const av_aes_block * round_key)74 static inline void addkey_d(uint8_t *dst, const av_aes_block *src,
75 const av_aes_block *round_key)
76 {
77 AV_WN64(dst, src->u64[0] ^ round_key->u64[0]);
78 AV_WN64(dst + 8, src->u64[1] ^ round_key->u64[1]);
79 }
80
subshift(av_aes_block s0[2],int s,const uint8_t * box)81 static void subshift(av_aes_block s0[2], int s, const uint8_t *box)
82 {
83 av_aes_block *s1 = (av_aes_block *) (s0[0].u8 - s);
84 av_aes_block *s3 = (av_aes_block *) (s0[0].u8 + s);
85
86 s0[0].u8[ 0] = box[s0[1].u8[ 0]];
87 s0[0].u8[ 4] = box[s0[1].u8[ 4]];
88 s0[0].u8[ 8] = box[s0[1].u8[ 8]];
89 s0[0].u8[12] = box[s0[1].u8[12]];
90 s1[0].u8[ 3] = box[s1[1].u8[ 7]];
91 s1[0].u8[ 7] = box[s1[1].u8[11]];
92 s1[0].u8[11] = box[s1[1].u8[15]];
93 s1[0].u8[15] = box[s1[1].u8[ 3]];
94 s0[0].u8[ 2] = box[s0[1].u8[10]];
95 s0[0].u8[10] = box[s0[1].u8[ 2]];
96 s0[0].u8[ 6] = box[s0[1].u8[14]];
97 s0[0].u8[14] = box[s0[1].u8[ 6]];
98 s3[0].u8[ 1] = box[s3[1].u8[13]];
99 s3[0].u8[13] = box[s3[1].u8[ 9]];
100 s3[0].u8[ 9] = box[s3[1].u8[ 5]];
101 s3[0].u8[ 5] = box[s3[1].u8[ 1]];
102 }
103
mix_core(uint32_t multbl[][256],int a,int b,int c,int d)104 static inline int mix_core(uint32_t multbl[][256], int a, int b, int c, int d)
105 {
106 #if CONFIG_SMALL
107 return multbl[0][a] ^ ROT(multbl[0][b], 8) ^ ROT(multbl[0][c], 16) ^ ROT(multbl[0][d], 24);
108 #else
109 return multbl[0][a] ^ multbl[1][b] ^ multbl[2][c] ^ multbl[3][d];
110 #endif
111 }
112
mix(av_aes_block state[2],uint32_t multbl[][256],int s1,int s3)113 static inline void mix(av_aes_block state[2], uint32_t multbl[][256], int s1, int s3)
114 {
115 uint8_t (*src)[4] = state[1].u8x4;
116 state[0].u32[0] = mix_core(multbl, src[0][0], src[s1 ][1], src[2][2], src[s3 ][3]);
117 state[0].u32[1] = mix_core(multbl, src[1][0], src[s3 - 1][1], src[3][2], src[s1 - 1][3]);
118 state[0].u32[2] = mix_core(multbl, src[2][0], src[s3 ][1], src[0][2], src[s1 ][3]);
119 state[0].u32[3] = mix_core(multbl, src[3][0], src[s1 - 1][1], src[1][2], src[s3 - 1][3]);
120 }
121
aes_crypt(AVAES * a,int s,const uint8_t * sbox,uint32_t multbl[][256])122 static inline void aes_crypt(AVAES *a, int s, const uint8_t *sbox,
123 uint32_t multbl[][256])
124 {
125 int r;
126
127 for (r = a->rounds - 1; r > 0; r--) {
128 mix(a->state, multbl, 3 - s, 1 + s);
129 addkey(&a->state[1], &a->state[0], &a->round_key[r]);
130 }
131
132 subshift(&a->state[0], s, sbox);
133 }
134
aes_encrypt(AVAES * a,uint8_t * dst,const uint8_t * src,int count,uint8_t * iv,int rounds)135 static void aes_encrypt(AVAES *a, uint8_t *dst, const uint8_t *src,
136 int count, uint8_t *iv, int rounds)
137 {
138 while (count--) {
139 addkey_s(&a->state[1], src, &a->round_key[rounds]);
140 if (iv)
141 addkey_s(&a->state[1], iv, &a->state[1]);
142 aes_crypt(a, 2, sbox, enc_multbl);
143 addkey_d(dst, &a->state[0], &a->round_key[0]);
144 if (iv)
145 memcpy(iv, dst, 16);
146 src += 16;
147 dst += 16;
148 }
149 }
150
aes_decrypt(AVAES * a,uint8_t * dst,const uint8_t * src,int count,uint8_t * iv,int rounds)151 static void aes_decrypt(AVAES *a, uint8_t *dst, const uint8_t *src,
152 int count, uint8_t *iv, int rounds)
153 {
154 while (count--) {
155 addkey_s(&a->state[1], src, &a->round_key[rounds]);
156 aes_crypt(a, 0, inv_sbox, dec_multbl);
157 if (iv) {
158 addkey_s(&a->state[0], iv, &a->state[0]);
159 memcpy(iv, src, 16);
160 }
161 addkey_d(dst, &a->state[0], &a->round_key[0]);
162 src += 16;
163 dst += 16;
164 }
165 }
166
av_aes_crypt(AVAES * a,uint8_t * dst,const uint8_t * src,int count,uint8_t * iv,int decrypt)167 void av_aes_crypt(AVAES *a, uint8_t *dst, const uint8_t *src,
168 int count, uint8_t *iv, int decrypt)
169 {
170 a->crypt(a, dst, src, count, iv, a->rounds);
171 }
172
init_multbl2(uint32_t tbl[][256],const int c[4],const uint8_t * log8,const uint8_t * alog8,const uint8_t * sbox)173 static void init_multbl2(uint32_t tbl[][256], const int c[4],
174 const uint8_t *log8, const uint8_t *alog8,
175 const uint8_t *sbox)
176 {
177 int i;
178
179 for (i = 0; i < 256; i++) {
180 int x = sbox[i];
181 if (x) {
182 int k, l, m, n;
183 x = log8[x];
184 k = alog8[x + log8[c[0]]];
185 l = alog8[x + log8[c[1]]];
186 m = alog8[x + log8[c[2]]];
187 n = alog8[x + log8[c[3]]];
188 tbl[0][i] = AV_NE(MKBETAG(k, l, m, n), MKTAG(k, l, m, n));
189 #if !CONFIG_SMALL
190 tbl[1][i] = ROT(tbl[0][i], 8);
191 tbl[2][i] = ROT(tbl[0][i], 16);
192 tbl[3][i] = ROT(tbl[0][i], 24);
193 #endif
194 }
195 }
196 }
197
198 // this is based on the reference AES code by Paulo Barreto and Vincent Rijmen
av_aes_init(AVAES * a,const uint8_t * key,int key_bits,int decrypt)199 int av_aes_init(AVAES *a, const uint8_t *key, int key_bits, int decrypt)
200 {
201 int i, j, t, rconpointer = 0;
202 uint8_t tk[8][4];
203 int KC = key_bits >> 5;
204 int rounds = KC + 6;
205 uint8_t log8[256];
206 uint8_t alog8[512];
207
208 a->crypt = decrypt ? aes_decrypt : aes_encrypt;
209
210 if (!enc_multbl[FF_ARRAY_ELEMS(enc_multbl) - 1][FF_ARRAY_ELEMS(enc_multbl[0]) - 1]) {
211 j = 1;
212 for (i = 0; i < 255; i++) {
213 alog8[i] = alog8[i + 255] = j;
214 log8[j] = i;
215 j ^= j + j;
216 if (j > 255)
217 j ^= 0x11B;
218 }
219 for (i = 0; i < 256; i++) {
220 j = i ? alog8[255 - log8[i]] : 0;
221 j ^= (j << 1) ^ (j << 2) ^ (j << 3) ^ (j << 4);
222 j = (j ^ (j >> 8) ^ 99) & 255;
223 inv_sbox[j] = i;
224 sbox[i] = j;
225 }
226 init_multbl2(dec_multbl, (const int[4]) { 0xe, 0x9, 0xd, 0xb },
227 log8, alog8, inv_sbox);
228 init_multbl2(enc_multbl, (const int[4]) { 0x2, 0x1, 0x1, 0x3 },
229 log8, alog8, sbox);
230 }
231
232 if (key_bits != 128 && key_bits != 192 && key_bits != 256)
233 return AVERROR(EINVAL);
234
235 a->rounds = rounds;
236
237 memcpy(tk, key, KC * 4);
238 memcpy(a->round_key[0].u8, key, KC * 4);
239
240 for (t = KC * 4; t < (rounds + 1) * 16; t += KC * 4) {
241 for (i = 0; i < 4; i++)
242 tk[0][i] ^= sbox[tk[KC - 1][(i + 1) & 3]];
243 tk[0][0] ^= rcon[rconpointer++];
244
245 for (j = 1; j < KC; j++) {
246 if (KC != 8 || j != KC >> 1)
247 for (i = 0; i < 4; i++)
248 tk[j][i] ^= tk[j - 1][i];
249 else
250 for (i = 0; i < 4; i++)
251 tk[j][i] ^= sbox[tk[j - 1][i]];
252 }
253
254 memcpy(a->round_key[0].u8 + t, tk, KC * 4);
255 }
256
257 if (decrypt) {
258 for (i = 1; i < rounds; i++) {
259 av_aes_block tmp[3];
260 tmp[2] = a->round_key[i];
261 subshift(&tmp[1], 0, sbox);
262 mix(tmp, dec_multbl, 1, 3);
263 a->round_key[i] = tmp[0];
264 }
265 } else {
266 for (i = 0; i < (rounds + 1) >> 1; i++)
267 FFSWAP(av_aes_block, a->round_key[i], a->round_key[rounds - i]);
268 }
269
270 return 0;
271 }
272
273