1 /*
2 * Bit sliced AES using NEON instructions
3 *
4 * Copyright (C) 2016 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <asm/neon.h>
12 #include <asm/simd.h>
13 #include <crypto/aes.h>
14 #include <crypto/internal/simd.h>
15 #include <crypto/internal/skcipher.h>
16 #include <crypto/xts.h>
17 #include <linux/module.h>
18
19 #include "aes-ctr-fallback.h"
20
21 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
22 MODULE_LICENSE("GPL v2");
23
24 MODULE_ALIAS_CRYPTO("ecb(aes)");
25 MODULE_ALIAS_CRYPTO("cbc(aes)");
26 MODULE_ALIAS_CRYPTO("ctr(aes)");
27 MODULE_ALIAS_CRYPTO("xts(aes)");
28
29 asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds);
30
31 asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
32 int rounds, int blocks);
33 asmlinkage void aesbs_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
34 int rounds, int blocks);
35
36 asmlinkage void aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
37 int rounds, int blocks, u8 iv[]);
38
39 asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
40 int rounds, int blocks, u8 iv[], u8 final[]);
41
42 asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[],
43 int rounds, int blocks, u8 iv[]);
44 asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[],
45 int rounds, int blocks, u8 iv[]);
46
47 /* borrowed from aes-neon-blk.ko */
48 asmlinkage void neon_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
49 int rounds, int blocks, int first);
50 asmlinkage void neon_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
51 int rounds, int blocks, u8 iv[],
52 int first);
53
54 struct aesbs_ctx {
55 u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32];
56 int rounds;
57 } __aligned(AES_BLOCK_SIZE);
58
59 struct aesbs_cbc_ctx {
60 struct aesbs_ctx key;
61 u32 enc[AES_MAX_KEYLENGTH_U32];
62 };
63
64 struct aesbs_ctr_ctx {
65 struct aesbs_ctx key; /* must be first member */
66 struct crypto_aes_ctx fallback;
67 };
68
69 struct aesbs_xts_ctx {
70 struct aesbs_ctx key;
71 u32 twkey[AES_MAX_KEYLENGTH_U32];
72 };
73
aesbs_setkey(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)74 static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
75 unsigned int key_len)
76 {
77 struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
78 struct crypto_aes_ctx rk;
79 int err;
80
81 err = crypto_aes_expand_key(&rk, in_key, key_len);
82 if (err)
83 return err;
84
85 ctx->rounds = 6 + key_len / 4;
86
87 kernel_neon_begin();
88 aesbs_convert_key(ctx->rk, rk.key_enc, ctx->rounds);
89 kernel_neon_end();
90
91 return 0;
92 }
93
__ecb_crypt(struct skcipher_request * req,void (* fn)(u8 out[],u8 const in[],u8 const rk[],int rounds,int blocks))94 static int __ecb_crypt(struct skcipher_request *req,
95 void (*fn)(u8 out[], u8 const in[], u8 const rk[],
96 int rounds, int blocks))
97 {
98 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
99 struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
100 struct skcipher_walk walk;
101 int err;
102
103 err = skcipher_walk_virt(&walk, req, true);
104
105 kernel_neon_begin();
106 while (walk.nbytes >= AES_BLOCK_SIZE) {
107 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
108
109 if (walk.nbytes < walk.total)
110 blocks = round_down(blocks,
111 walk.stride / AES_BLOCK_SIZE);
112
113 fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk,
114 ctx->rounds, blocks);
115 err = skcipher_walk_done(&walk,
116 walk.nbytes - blocks * AES_BLOCK_SIZE);
117 }
118 kernel_neon_end();
119
120 return err;
121 }
122
ecb_encrypt(struct skcipher_request * req)123 static int ecb_encrypt(struct skcipher_request *req)
124 {
125 return __ecb_crypt(req, aesbs_ecb_encrypt);
126 }
127
ecb_decrypt(struct skcipher_request * req)128 static int ecb_decrypt(struct skcipher_request *req)
129 {
130 return __ecb_crypt(req, aesbs_ecb_decrypt);
131 }
132
aesbs_cbc_setkey(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)133 static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
134 unsigned int key_len)
135 {
136 struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
137 struct crypto_aes_ctx rk;
138 int err;
139
140 err = crypto_aes_expand_key(&rk, in_key, key_len);
141 if (err)
142 return err;
143
144 ctx->key.rounds = 6 + key_len / 4;
145
146 memcpy(ctx->enc, rk.key_enc, sizeof(ctx->enc));
147
148 kernel_neon_begin();
149 aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
150 kernel_neon_end();
151
152 return 0;
153 }
154
cbc_encrypt(struct skcipher_request * req)155 static int cbc_encrypt(struct skcipher_request *req)
156 {
157 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
158 struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
159 struct skcipher_walk walk;
160 int err, first = 1;
161
162 err = skcipher_walk_virt(&walk, req, true);
163
164 kernel_neon_begin();
165 while (walk.nbytes >= AES_BLOCK_SIZE) {
166 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
167
168 /* fall back to the non-bitsliced NEON implementation */
169 neon_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
170 ctx->enc, ctx->key.rounds, blocks, walk.iv,
171 first);
172 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
173 first = 0;
174 }
175 kernel_neon_end();
176 return err;
177 }
178
cbc_decrypt(struct skcipher_request * req)179 static int cbc_decrypt(struct skcipher_request *req)
180 {
181 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
182 struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
183 struct skcipher_walk walk;
184 int err;
185
186 err = skcipher_walk_virt(&walk, req, true);
187
188 kernel_neon_begin();
189 while (walk.nbytes >= AES_BLOCK_SIZE) {
190 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
191
192 if (walk.nbytes < walk.total)
193 blocks = round_down(blocks,
194 walk.stride / AES_BLOCK_SIZE);
195
196 aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
197 ctx->key.rk, ctx->key.rounds, blocks,
198 walk.iv);
199 err = skcipher_walk_done(&walk,
200 walk.nbytes - blocks * AES_BLOCK_SIZE);
201 }
202 kernel_neon_end();
203
204 return err;
205 }
206
aesbs_ctr_setkey_sync(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)207 static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
208 unsigned int key_len)
209 {
210 struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
211 int err;
212
213 err = crypto_aes_expand_key(&ctx->fallback, in_key, key_len);
214 if (err)
215 return err;
216
217 ctx->key.rounds = 6 + key_len / 4;
218
219 kernel_neon_begin();
220 aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds);
221 kernel_neon_end();
222
223 return 0;
224 }
225
ctr_encrypt(struct skcipher_request * req)226 static int ctr_encrypt(struct skcipher_request *req)
227 {
228 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
229 struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
230 struct skcipher_walk walk;
231 u8 buf[AES_BLOCK_SIZE];
232 int err;
233
234 err = skcipher_walk_virt(&walk, req, true);
235
236 kernel_neon_begin();
237 while (walk.nbytes > 0) {
238 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
239 u8 *final = (walk.total % AES_BLOCK_SIZE) ? buf : NULL;
240
241 if (walk.nbytes < walk.total) {
242 blocks = round_down(blocks,
243 walk.stride / AES_BLOCK_SIZE);
244 final = NULL;
245 }
246
247 aesbs_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
248 ctx->rk, ctx->rounds, blocks, walk.iv, final);
249
250 if (final) {
251 u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
252 u8 *src = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
253
254 crypto_xor_cpy(dst, src, final,
255 walk.total % AES_BLOCK_SIZE);
256
257 err = skcipher_walk_done(&walk, 0);
258 break;
259 }
260 err = skcipher_walk_done(&walk,
261 walk.nbytes - blocks * AES_BLOCK_SIZE);
262 }
263 kernel_neon_end();
264
265 return err;
266 }
267
aesbs_xts_setkey(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)268 static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
269 unsigned int key_len)
270 {
271 struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
272 struct crypto_aes_ctx rk;
273 int err;
274
275 err = xts_verify_key(tfm, in_key, key_len);
276 if (err)
277 return err;
278
279 key_len /= 2;
280 err = crypto_aes_expand_key(&rk, in_key + key_len, key_len);
281 if (err)
282 return err;
283
284 memcpy(ctx->twkey, rk.key_enc, sizeof(ctx->twkey));
285
286 return aesbs_setkey(tfm, in_key, key_len);
287 }
288
ctr_encrypt_sync(struct skcipher_request * req)289 static int ctr_encrypt_sync(struct skcipher_request *req)
290 {
291 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
292 struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
293
294 if (!may_use_simd())
295 return aes_ctr_encrypt_fallback(&ctx->fallback, req);
296
297 return ctr_encrypt(req);
298 }
299
__xts_crypt(struct skcipher_request * req,void (* fn)(u8 out[],u8 const in[],u8 const rk[],int rounds,int blocks,u8 iv[]))300 static int __xts_crypt(struct skcipher_request *req,
301 void (*fn)(u8 out[], u8 const in[], u8 const rk[],
302 int rounds, int blocks, u8 iv[]))
303 {
304 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
305 struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
306 struct skcipher_walk walk;
307 int err;
308
309 err = skcipher_walk_virt(&walk, req, true);
310 if (err)
311 return err;
312
313 kernel_neon_begin();
314
315 neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey,
316 ctx->key.rounds, 1, 1);
317
318 while (walk.nbytes >= AES_BLOCK_SIZE) {
319 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
320
321 if (walk.nbytes < walk.total)
322 blocks = round_down(blocks,
323 walk.stride / AES_BLOCK_SIZE);
324
325 fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk,
326 ctx->key.rounds, blocks, walk.iv);
327 err = skcipher_walk_done(&walk,
328 walk.nbytes - blocks * AES_BLOCK_SIZE);
329 }
330 kernel_neon_end();
331
332 return err;
333 }
334
xts_encrypt(struct skcipher_request * req)335 static int xts_encrypt(struct skcipher_request *req)
336 {
337 return __xts_crypt(req, aesbs_xts_encrypt);
338 }
339
xts_decrypt(struct skcipher_request * req)340 static int xts_decrypt(struct skcipher_request *req)
341 {
342 return __xts_crypt(req, aesbs_xts_decrypt);
343 }
344
345 static struct skcipher_alg aes_algs[] = { {
346 .base.cra_name = "__ecb(aes)",
347 .base.cra_driver_name = "__ecb-aes-neonbs",
348 .base.cra_priority = 250,
349 .base.cra_blocksize = AES_BLOCK_SIZE,
350 .base.cra_ctxsize = sizeof(struct aesbs_ctx),
351 .base.cra_module = THIS_MODULE,
352 .base.cra_flags = CRYPTO_ALG_INTERNAL,
353
354 .min_keysize = AES_MIN_KEY_SIZE,
355 .max_keysize = AES_MAX_KEY_SIZE,
356 .walksize = 8 * AES_BLOCK_SIZE,
357 .setkey = aesbs_setkey,
358 .encrypt = ecb_encrypt,
359 .decrypt = ecb_decrypt,
360 }, {
361 .base.cra_name = "__cbc(aes)",
362 .base.cra_driver_name = "__cbc-aes-neonbs",
363 .base.cra_priority = 250,
364 .base.cra_blocksize = AES_BLOCK_SIZE,
365 .base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
366 .base.cra_module = THIS_MODULE,
367 .base.cra_flags = CRYPTO_ALG_INTERNAL,
368
369 .min_keysize = AES_MIN_KEY_SIZE,
370 .max_keysize = AES_MAX_KEY_SIZE,
371 .walksize = 8 * AES_BLOCK_SIZE,
372 .ivsize = AES_BLOCK_SIZE,
373 .setkey = aesbs_cbc_setkey,
374 .encrypt = cbc_encrypt,
375 .decrypt = cbc_decrypt,
376 }, {
377 .base.cra_name = "__ctr(aes)",
378 .base.cra_driver_name = "__ctr-aes-neonbs",
379 .base.cra_priority = 250,
380 .base.cra_blocksize = 1,
381 .base.cra_ctxsize = sizeof(struct aesbs_ctx),
382 .base.cra_module = THIS_MODULE,
383 .base.cra_flags = CRYPTO_ALG_INTERNAL,
384
385 .min_keysize = AES_MIN_KEY_SIZE,
386 .max_keysize = AES_MAX_KEY_SIZE,
387 .chunksize = AES_BLOCK_SIZE,
388 .walksize = 8 * AES_BLOCK_SIZE,
389 .ivsize = AES_BLOCK_SIZE,
390 .setkey = aesbs_setkey,
391 .encrypt = ctr_encrypt,
392 .decrypt = ctr_encrypt,
393 }, {
394 .base.cra_name = "ctr(aes)",
395 .base.cra_driver_name = "ctr-aes-neonbs",
396 .base.cra_priority = 250 - 1,
397 .base.cra_blocksize = 1,
398 .base.cra_ctxsize = sizeof(struct aesbs_ctr_ctx),
399 .base.cra_module = THIS_MODULE,
400
401 .min_keysize = AES_MIN_KEY_SIZE,
402 .max_keysize = AES_MAX_KEY_SIZE,
403 .chunksize = AES_BLOCK_SIZE,
404 .walksize = 8 * AES_BLOCK_SIZE,
405 .ivsize = AES_BLOCK_SIZE,
406 .setkey = aesbs_ctr_setkey_sync,
407 .encrypt = ctr_encrypt_sync,
408 .decrypt = ctr_encrypt_sync,
409 }, {
410 .base.cra_name = "__xts(aes)",
411 .base.cra_driver_name = "__xts-aes-neonbs",
412 .base.cra_priority = 250,
413 .base.cra_blocksize = AES_BLOCK_SIZE,
414 .base.cra_ctxsize = sizeof(struct aesbs_xts_ctx),
415 .base.cra_module = THIS_MODULE,
416 .base.cra_flags = CRYPTO_ALG_INTERNAL,
417
418 .min_keysize = 2 * AES_MIN_KEY_SIZE,
419 .max_keysize = 2 * AES_MAX_KEY_SIZE,
420 .walksize = 8 * AES_BLOCK_SIZE,
421 .ivsize = AES_BLOCK_SIZE,
422 .setkey = aesbs_xts_setkey,
423 .encrypt = xts_encrypt,
424 .decrypt = xts_decrypt,
425 } };
426
427 static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
428
aes_exit(void)429 static void aes_exit(void)
430 {
431 int i;
432
433 for (i = 0; i < ARRAY_SIZE(aes_simd_algs); i++)
434 if (aes_simd_algs[i])
435 simd_skcipher_free(aes_simd_algs[i]);
436
437 crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
438 }
439
aes_init(void)440 static int __init aes_init(void)
441 {
442 struct simd_skcipher_alg *simd;
443 const char *basename;
444 const char *algname;
445 const char *drvname;
446 int err;
447 int i;
448
449 if (!(elf_hwcap & HWCAP_ASIMD))
450 return -ENODEV;
451
452 err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
453 if (err)
454 return err;
455
456 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
457 if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL))
458 continue;
459
460 algname = aes_algs[i].base.cra_name + 2;
461 drvname = aes_algs[i].base.cra_driver_name + 2;
462 basename = aes_algs[i].base.cra_driver_name;
463 simd = simd_skcipher_create_compat(algname, drvname, basename);
464 err = PTR_ERR(simd);
465 if (IS_ERR(simd))
466 goto unregister_simds;
467
468 aes_simd_algs[i] = simd;
469 }
470 return 0;
471
472 unregister_simds:
473 aes_exit();
474 return err;
475 }
476
477 module_init(aes_init);
478 module_exit(aes_exit);
479