• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Support for Intel AES-NI instructions. This file contains glue
3  * code, the real AES implementation is in intel-aes_asm.S.
4  *
5  * Copyright (C) 2008, Intel Corp.
6  *    Author: Huang Ying <ying.huang@intel.com>
7  *
8  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9  * interface for 64-bit kernels.
10  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
11  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
12  *             Tadeusz Struk (tadeusz.struk@intel.com)
13  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
14  *    Copyright (c) 2010, Intel Corporation.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  */
21 
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/fpu/api.h>
36 #include <asm/crypto/aes.h>
37 #include <crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
42 #ifdef CONFIG_X86_64
43 #include <asm/crypto/glue_helper.h>
44 #endif
45 
46 
47 #define AESNI_ALIGN	16
48 #define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE - 1))
49 #define RFC4106_HASH_SUBKEY_SIZE 16
50 
51 /* This data is stored at the end of the crypto_tfm struct.
52  * It's a type of per "session" data storage location.
53  * This needs to be 16 byte aligned.
54  */
55 struct aesni_rfc4106_gcm_ctx {
56 	u8 hash_subkey[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
57 	struct crypto_aes_ctx aes_key_expanded
58 		__attribute__ ((__aligned__(AESNI_ALIGN)));
59 	u8 nonce[4];
60 };
61 
62 struct aesni_gcm_set_hash_subkey_result {
63 	int err;
64 	struct completion completion;
65 };
66 
67 struct aesni_hash_subkey_req_data {
68 	u8 iv[16];
69 	struct aesni_gcm_set_hash_subkey_result result;
70 	struct scatterlist sg;
71 };
72 
73 struct aesni_lrw_ctx {
74 	struct lrw_table_ctx lrw_table;
75 	u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
76 };
77 
78 struct aesni_xts_ctx {
79 	u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
80 	u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
81 };
82 
83 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84 			     unsigned int key_len);
85 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
86 			  const u8 *in);
87 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
88 			  const u8 *in);
89 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
90 			      const u8 *in, unsigned int len);
91 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
92 			      const u8 *in, unsigned int len);
93 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
94 			      const u8 *in, unsigned int len, u8 *iv);
95 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
96 			      const u8 *in, unsigned int len, u8 *iv);
97 
98 int crypto_fpu_init(void);
99 void crypto_fpu_exit(void);
100 
101 #define AVX_GEN2_OPTSIZE 640
102 #define AVX_GEN4_OPTSIZE 4096
103 
104 #ifdef CONFIG_X86_64
105 
106 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
107 			      const u8 *in, unsigned int len, u8 *iv);
108 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
109 			      const u8 *in, unsigned int len, u8 *iv);
110 
111 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
112 				 const u8 *in, bool enc, u8 *iv);
113 
114 /* asmlinkage void aesni_gcm_enc()
115  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
116  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
117  * const u8 *in, Plaintext input
118  * unsigned long plaintext_len, Length of data in bytes for encryption.
119  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
120  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
121  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
122  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
123  * const u8 *aad, Additional Authentication Data (AAD)
124  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
125  *          is going to be 8 or 12 bytes
126  * u8 *auth_tag, Authenticated Tag output.
127  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
128  *          Valid values are 16 (most likely), 12 or 8.
129  */
130 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
131 			const u8 *in, unsigned long plaintext_len, u8 *iv,
132 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
133 			u8 *auth_tag, unsigned long auth_tag_len);
134 
135 /* asmlinkage void aesni_gcm_dec()
136  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
137  * u8 *out, Plaintext output. Decrypt in-place is allowed.
138  * const u8 *in, Ciphertext input
139  * unsigned long ciphertext_len, Length of data in bytes for decryption.
140  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
141  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
142  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
143  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
144  * const u8 *aad, Additional Authentication Data (AAD)
145  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
146  * to be 8 or 12 bytes
147  * u8 *auth_tag, Authenticated Tag output.
148  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
149  * Valid values are 16 (most likely), 12 or 8.
150  */
151 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
152 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
153 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
154 			u8 *auth_tag, unsigned long auth_tag_len);
155 
156 
157 #ifdef CONFIG_AS_AVX
158 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
159 		void *keys, u8 *out, unsigned int num_bytes);
160 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
161 		void *keys, u8 *out, unsigned int num_bytes);
162 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
163 		void *keys, u8 *out, unsigned int num_bytes);
164 /*
165  * asmlinkage void aesni_gcm_precomp_avx_gen2()
166  * gcm_data *my_ctx_data, context data
167  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
168  */
169 asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
170 
171 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
172 			const u8 *in, unsigned long plaintext_len, u8 *iv,
173 			const u8 *aad, unsigned long aad_len,
174 			u8 *auth_tag, unsigned long auth_tag_len);
175 
176 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
177 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
178 			const u8 *aad, unsigned long aad_len,
179 			u8 *auth_tag, unsigned long auth_tag_len);
180 
aesni_gcm_enc_avx(void * ctx,u8 * out,const u8 * in,unsigned long plaintext_len,u8 * iv,u8 * hash_subkey,const u8 * aad,unsigned long aad_len,u8 * auth_tag,unsigned long auth_tag_len)181 static void aesni_gcm_enc_avx(void *ctx, u8 *out,
182 			const u8 *in, unsigned long plaintext_len, u8 *iv,
183 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
184 			u8 *auth_tag, unsigned long auth_tag_len)
185 {
186         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
187 	if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
188 		aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
189 				aad_len, auth_tag, auth_tag_len);
190 	} else {
191 		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
192 		aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
193 					aad_len, auth_tag, auth_tag_len);
194 	}
195 }
196 
aesni_gcm_dec_avx(void * ctx,u8 * out,const u8 * in,unsigned long ciphertext_len,u8 * iv,u8 * hash_subkey,const u8 * aad,unsigned long aad_len,u8 * auth_tag,unsigned long auth_tag_len)197 static void aesni_gcm_dec_avx(void *ctx, u8 *out,
198 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
199 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
200 			u8 *auth_tag, unsigned long auth_tag_len)
201 {
202         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
203 	if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
204 		aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
205 				aad_len, auth_tag, auth_tag_len);
206 	} else {
207 		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
208 		aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
209 					aad_len, auth_tag, auth_tag_len);
210 	}
211 }
212 #endif
213 
214 #ifdef CONFIG_AS_AVX2
215 /*
216  * asmlinkage void aesni_gcm_precomp_avx_gen4()
217  * gcm_data *my_ctx_data, context data
218  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
219  */
220 asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
221 
222 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
223 			const u8 *in, unsigned long plaintext_len, u8 *iv,
224 			const u8 *aad, unsigned long aad_len,
225 			u8 *auth_tag, unsigned long auth_tag_len);
226 
227 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
228 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
229 			const u8 *aad, unsigned long aad_len,
230 			u8 *auth_tag, unsigned long auth_tag_len);
231 
aesni_gcm_enc_avx2(void * ctx,u8 * out,const u8 * in,unsigned long plaintext_len,u8 * iv,u8 * hash_subkey,const u8 * aad,unsigned long aad_len,u8 * auth_tag,unsigned long auth_tag_len)232 static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
233 			const u8 *in, unsigned long plaintext_len, u8 *iv,
234 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
235 			u8 *auth_tag, unsigned long auth_tag_len)
236 {
237        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
238 	if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
239 		aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
240 				aad_len, auth_tag, auth_tag_len);
241 	} else if (plaintext_len < AVX_GEN4_OPTSIZE) {
242 		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
243 		aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
244 					aad_len, auth_tag, auth_tag_len);
245 	} else {
246 		aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
247 		aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
248 					aad_len, auth_tag, auth_tag_len);
249 	}
250 }
251 
aesni_gcm_dec_avx2(void * ctx,u8 * out,const u8 * in,unsigned long ciphertext_len,u8 * iv,u8 * hash_subkey,const u8 * aad,unsigned long aad_len,u8 * auth_tag,unsigned long auth_tag_len)252 static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
253 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
254 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
255 			u8 *auth_tag, unsigned long auth_tag_len)
256 {
257        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
258 	if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
259 		aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
260 				aad, aad_len, auth_tag, auth_tag_len);
261 	} else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
262 		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
263 		aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
264 					aad_len, auth_tag, auth_tag_len);
265 	} else {
266 		aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
267 		aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
268 					aad_len, auth_tag, auth_tag_len);
269 	}
270 }
271 #endif
272 
273 static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
274 			const u8 *in, unsigned long plaintext_len, u8 *iv,
275 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
276 			u8 *auth_tag, unsigned long auth_tag_len);
277 
278 static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
279 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
280 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
281 			u8 *auth_tag, unsigned long auth_tag_len);
282 
283 static inline struct
aesni_rfc4106_gcm_ctx_get(struct crypto_aead * tfm)284 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
285 {
286 	unsigned long align = AESNI_ALIGN;
287 
288 	if (align <= crypto_tfm_ctx_alignment())
289 		align = 1;
290 	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
291 }
292 #endif
293 
aes_ctx(void * raw_ctx)294 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
295 {
296 	unsigned long addr = (unsigned long)raw_ctx;
297 	unsigned long align = AESNI_ALIGN;
298 
299 	if (align <= crypto_tfm_ctx_alignment())
300 		align = 1;
301 	return (struct crypto_aes_ctx *)ALIGN(addr, align);
302 }
303 
aes_set_key_common(struct crypto_tfm * tfm,void * raw_ctx,const u8 * in_key,unsigned int key_len)304 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
305 			      const u8 *in_key, unsigned int key_len)
306 {
307 	struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
308 	u32 *flags = &tfm->crt_flags;
309 	int err;
310 
311 	if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
312 	    key_len != AES_KEYSIZE_256) {
313 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
314 		return -EINVAL;
315 	}
316 
317 	if (!irq_fpu_usable())
318 		err = crypto_aes_expand_key(ctx, in_key, key_len);
319 	else {
320 		kernel_fpu_begin();
321 		err = aesni_set_key(ctx, in_key, key_len);
322 		kernel_fpu_end();
323 	}
324 
325 	return err;
326 }
327 
aes_set_key(struct crypto_tfm * tfm,const u8 * in_key,unsigned int key_len)328 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
329 		       unsigned int key_len)
330 {
331 	return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
332 }
333 
aes_encrypt(struct crypto_tfm * tfm,u8 * dst,const u8 * src)334 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
335 {
336 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
337 
338 	if (!irq_fpu_usable())
339 		crypto_aes_encrypt_x86(ctx, dst, src);
340 	else {
341 		kernel_fpu_begin();
342 		aesni_enc(ctx, dst, src);
343 		kernel_fpu_end();
344 	}
345 }
346 
aes_decrypt(struct crypto_tfm * tfm,u8 * dst,const u8 * src)347 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
348 {
349 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
350 
351 	if (!irq_fpu_usable())
352 		crypto_aes_decrypt_x86(ctx, dst, src);
353 	else {
354 		kernel_fpu_begin();
355 		aesni_dec(ctx, dst, src);
356 		kernel_fpu_end();
357 	}
358 }
359 
__aes_encrypt(struct crypto_tfm * tfm,u8 * dst,const u8 * src)360 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
361 {
362 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
363 
364 	aesni_enc(ctx, dst, src);
365 }
366 
__aes_decrypt(struct crypto_tfm * tfm,u8 * dst,const u8 * src)367 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
368 {
369 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
370 
371 	aesni_dec(ctx, dst, src);
372 }
373 
ecb_encrypt(struct blkcipher_desc * desc,struct scatterlist * dst,struct scatterlist * src,unsigned int nbytes)374 static int ecb_encrypt(struct blkcipher_desc *desc,
375 		       struct scatterlist *dst, struct scatterlist *src,
376 		       unsigned int nbytes)
377 {
378 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
379 	struct blkcipher_walk walk;
380 	int err;
381 
382 	blkcipher_walk_init(&walk, dst, src, nbytes);
383 	err = blkcipher_walk_virt(desc, &walk);
384 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
385 
386 	kernel_fpu_begin();
387 	while ((nbytes = walk.nbytes)) {
388 		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
389 			      nbytes & AES_BLOCK_MASK);
390 		nbytes &= AES_BLOCK_SIZE - 1;
391 		err = blkcipher_walk_done(desc, &walk, nbytes);
392 	}
393 	kernel_fpu_end();
394 
395 	return err;
396 }
397 
ecb_decrypt(struct blkcipher_desc * desc,struct scatterlist * dst,struct scatterlist * src,unsigned int nbytes)398 static int ecb_decrypt(struct blkcipher_desc *desc,
399 		       struct scatterlist *dst, struct scatterlist *src,
400 		       unsigned int nbytes)
401 {
402 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
403 	struct blkcipher_walk walk;
404 	int err;
405 
406 	blkcipher_walk_init(&walk, dst, src, nbytes);
407 	err = blkcipher_walk_virt(desc, &walk);
408 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
409 
410 	kernel_fpu_begin();
411 	while ((nbytes = walk.nbytes)) {
412 		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
413 			      nbytes & AES_BLOCK_MASK);
414 		nbytes &= AES_BLOCK_SIZE - 1;
415 		err = blkcipher_walk_done(desc, &walk, nbytes);
416 	}
417 	kernel_fpu_end();
418 
419 	return err;
420 }
421 
cbc_encrypt(struct blkcipher_desc * desc,struct scatterlist * dst,struct scatterlist * src,unsigned int nbytes)422 static int cbc_encrypt(struct blkcipher_desc *desc,
423 		       struct scatterlist *dst, struct scatterlist *src,
424 		       unsigned int nbytes)
425 {
426 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
427 	struct blkcipher_walk walk;
428 	int err;
429 
430 	blkcipher_walk_init(&walk, dst, src, nbytes);
431 	err = blkcipher_walk_virt(desc, &walk);
432 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
433 
434 	kernel_fpu_begin();
435 	while ((nbytes = walk.nbytes)) {
436 		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
437 			      nbytes & AES_BLOCK_MASK, walk.iv);
438 		nbytes &= AES_BLOCK_SIZE - 1;
439 		err = blkcipher_walk_done(desc, &walk, nbytes);
440 	}
441 	kernel_fpu_end();
442 
443 	return err;
444 }
445 
cbc_decrypt(struct blkcipher_desc * desc,struct scatterlist * dst,struct scatterlist * src,unsigned int nbytes)446 static int cbc_decrypt(struct blkcipher_desc *desc,
447 		       struct scatterlist *dst, struct scatterlist *src,
448 		       unsigned int nbytes)
449 {
450 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
451 	struct blkcipher_walk walk;
452 	int err;
453 
454 	blkcipher_walk_init(&walk, dst, src, nbytes);
455 	err = blkcipher_walk_virt(desc, &walk);
456 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
457 
458 	kernel_fpu_begin();
459 	while ((nbytes = walk.nbytes)) {
460 		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
461 			      nbytes & AES_BLOCK_MASK, walk.iv);
462 		nbytes &= AES_BLOCK_SIZE - 1;
463 		err = blkcipher_walk_done(desc, &walk, nbytes);
464 	}
465 	kernel_fpu_end();
466 
467 	return err;
468 }
469 
470 #ifdef CONFIG_X86_64
ctr_crypt_final(struct crypto_aes_ctx * ctx,struct blkcipher_walk * walk)471 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
472 			    struct blkcipher_walk *walk)
473 {
474 	u8 *ctrblk = walk->iv;
475 	u8 keystream[AES_BLOCK_SIZE];
476 	u8 *src = walk->src.virt.addr;
477 	u8 *dst = walk->dst.virt.addr;
478 	unsigned int nbytes = walk->nbytes;
479 
480 	aesni_enc(ctx, keystream, ctrblk);
481 	crypto_xor(keystream, src, nbytes);
482 	memcpy(dst, keystream, nbytes);
483 	crypto_inc(ctrblk, AES_BLOCK_SIZE);
484 }
485 
486 #ifdef CONFIG_AS_AVX
aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx * ctx,u8 * out,const u8 * in,unsigned int len,u8 * iv)487 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
488 			      const u8 *in, unsigned int len, u8 *iv)
489 {
490 	/*
491 	 * based on key length, override with the by8 version
492 	 * of ctr mode encryption/decryption for improved performance
493 	 * aes_set_key_common() ensures that key length is one of
494 	 * {128,192,256}
495 	 */
496 	if (ctx->key_length == AES_KEYSIZE_128)
497 		aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
498 	else if (ctx->key_length == AES_KEYSIZE_192)
499 		aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
500 	else
501 		aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
502 }
503 #endif
504 
ctr_crypt(struct blkcipher_desc * desc,struct scatterlist * dst,struct scatterlist * src,unsigned int nbytes)505 static int ctr_crypt(struct blkcipher_desc *desc,
506 		     struct scatterlist *dst, struct scatterlist *src,
507 		     unsigned int nbytes)
508 {
509 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
510 	struct blkcipher_walk walk;
511 	int err;
512 
513 	blkcipher_walk_init(&walk, dst, src, nbytes);
514 	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
515 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
516 
517 	kernel_fpu_begin();
518 	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
519 		aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
520 			              nbytes & AES_BLOCK_MASK, walk.iv);
521 		nbytes &= AES_BLOCK_SIZE - 1;
522 		err = blkcipher_walk_done(desc, &walk, nbytes);
523 	}
524 	if (walk.nbytes) {
525 		ctr_crypt_final(ctx, &walk);
526 		err = blkcipher_walk_done(desc, &walk, 0);
527 	}
528 	kernel_fpu_end();
529 
530 	return err;
531 }
532 #endif
533 
ablk_ecb_init(struct crypto_tfm * tfm)534 static int ablk_ecb_init(struct crypto_tfm *tfm)
535 {
536 	return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
537 }
538 
ablk_cbc_init(struct crypto_tfm * tfm)539 static int ablk_cbc_init(struct crypto_tfm *tfm)
540 {
541 	return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
542 }
543 
544 #ifdef CONFIG_X86_64
ablk_ctr_init(struct crypto_tfm * tfm)545 static int ablk_ctr_init(struct crypto_tfm *tfm)
546 {
547 	return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
548 }
549 
550 #endif
551 
552 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
ablk_pcbc_init(struct crypto_tfm * tfm)553 static int ablk_pcbc_init(struct crypto_tfm *tfm)
554 {
555 	return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
556 }
557 #endif
558 
lrw_xts_encrypt_callback(void * ctx,u8 * blks,unsigned int nbytes)559 static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
560 {
561 	aesni_ecb_enc(ctx, blks, blks, nbytes);
562 }
563 
lrw_xts_decrypt_callback(void * ctx,u8 * blks,unsigned int nbytes)564 static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
565 {
566 	aesni_ecb_dec(ctx, blks, blks, nbytes);
567 }
568 
lrw_aesni_setkey(struct crypto_tfm * tfm,const u8 * key,unsigned int keylen)569 static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
570 			    unsigned int keylen)
571 {
572 	struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
573 	int err;
574 
575 	err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
576 				 keylen - AES_BLOCK_SIZE);
577 	if (err)
578 		return err;
579 
580 	return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
581 }
582 
lrw_aesni_exit_tfm(struct crypto_tfm * tfm)583 static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
584 {
585 	struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
586 
587 	lrw_free_table(&ctx->lrw_table);
588 }
589 
lrw_encrypt(struct blkcipher_desc * desc,struct scatterlist * dst,struct scatterlist * src,unsigned int nbytes)590 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
591 		       struct scatterlist *src, unsigned int nbytes)
592 {
593 	struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
594 	be128 buf[8];
595 	struct lrw_crypt_req req = {
596 		.tbuf = buf,
597 		.tbuflen = sizeof(buf),
598 
599 		.table_ctx = &ctx->lrw_table,
600 		.crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
601 		.crypt_fn = lrw_xts_encrypt_callback,
602 	};
603 	int ret;
604 
605 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
606 
607 	kernel_fpu_begin();
608 	ret = lrw_crypt(desc, dst, src, nbytes, &req);
609 	kernel_fpu_end();
610 
611 	return ret;
612 }
613 
lrw_decrypt(struct blkcipher_desc * desc,struct scatterlist * dst,struct scatterlist * src,unsigned int nbytes)614 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
615 		       struct scatterlist *src, unsigned int nbytes)
616 {
617 	struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
618 	be128 buf[8];
619 	struct lrw_crypt_req req = {
620 		.tbuf = buf,
621 		.tbuflen = sizeof(buf),
622 
623 		.table_ctx = &ctx->lrw_table,
624 		.crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
625 		.crypt_fn = lrw_xts_decrypt_callback,
626 	};
627 	int ret;
628 
629 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
630 
631 	kernel_fpu_begin();
632 	ret = lrw_crypt(desc, dst, src, nbytes, &req);
633 	kernel_fpu_end();
634 
635 	return ret;
636 }
637 
xts_aesni_setkey(struct crypto_tfm * tfm,const u8 * key,unsigned int keylen)638 static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
639 			    unsigned int keylen)
640 {
641 	struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
642 	u32 *flags = &tfm->crt_flags;
643 	int err;
644 
645 	/* key consists of keys of equal size concatenated, therefore
646 	 * the length must be even
647 	 */
648 	if (keylen % 2) {
649 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
650 		return -EINVAL;
651 	}
652 
653 	/* first half of xts-key is for crypt */
654 	err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
655 	if (err)
656 		return err;
657 
658 	/* second half of xts-key is for tweak */
659 	return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
660 				  keylen / 2);
661 }
662 
663 
aesni_xts_tweak(void * ctx,u8 * out,const u8 * in)664 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
665 {
666 	aesni_enc(ctx, out, in);
667 }
668 
669 #ifdef CONFIG_X86_64
670 
aesni_xts_enc(void * ctx,u128 * dst,const u128 * src,le128 * iv)671 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
672 {
673 	glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
674 }
675 
aesni_xts_dec(void * ctx,u128 * dst,const u128 * src,le128 * iv)676 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
677 {
678 	glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
679 }
680 
aesni_xts_enc8(void * ctx,u128 * dst,const u128 * src,le128 * iv)681 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
682 {
683 	aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
684 }
685 
aesni_xts_dec8(void * ctx,u128 * dst,const u128 * src,le128 * iv)686 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
687 {
688 	aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
689 }
690 
691 static const struct common_glue_ctx aesni_enc_xts = {
692 	.num_funcs = 2,
693 	.fpu_blocks_limit = 1,
694 
695 	.funcs = { {
696 		.num_blocks = 8,
697 		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
698 	}, {
699 		.num_blocks = 1,
700 		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
701 	} }
702 };
703 
704 static const struct common_glue_ctx aesni_dec_xts = {
705 	.num_funcs = 2,
706 	.fpu_blocks_limit = 1,
707 
708 	.funcs = { {
709 		.num_blocks = 8,
710 		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
711 	}, {
712 		.num_blocks = 1,
713 		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
714 	} }
715 };
716 
xts_encrypt(struct blkcipher_desc * desc,struct scatterlist * dst,struct scatterlist * src,unsigned int nbytes)717 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
718 		       struct scatterlist *src, unsigned int nbytes)
719 {
720 	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
721 
722 	return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
723 				     XTS_TWEAK_CAST(aesni_xts_tweak),
724 				     aes_ctx(ctx->raw_tweak_ctx),
725 				     aes_ctx(ctx->raw_crypt_ctx));
726 }
727 
xts_decrypt(struct blkcipher_desc * desc,struct scatterlist * dst,struct scatterlist * src,unsigned int nbytes)728 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
729 		       struct scatterlist *src, unsigned int nbytes)
730 {
731 	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
732 
733 	return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
734 				     XTS_TWEAK_CAST(aesni_xts_tweak),
735 				     aes_ctx(ctx->raw_tweak_ctx),
736 				     aes_ctx(ctx->raw_crypt_ctx));
737 }
738 
739 #else
740 
xts_encrypt(struct blkcipher_desc * desc,struct scatterlist * dst,struct scatterlist * src,unsigned int nbytes)741 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
742 		       struct scatterlist *src, unsigned int nbytes)
743 {
744 	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
745 	be128 buf[8];
746 	struct xts_crypt_req req = {
747 		.tbuf = buf,
748 		.tbuflen = sizeof(buf),
749 
750 		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
751 		.tweak_fn = aesni_xts_tweak,
752 		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
753 		.crypt_fn = lrw_xts_encrypt_callback,
754 	};
755 	int ret;
756 
757 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
758 
759 	kernel_fpu_begin();
760 	ret = xts_crypt(desc, dst, src, nbytes, &req);
761 	kernel_fpu_end();
762 
763 	return ret;
764 }
765 
xts_decrypt(struct blkcipher_desc * desc,struct scatterlist * dst,struct scatterlist * src,unsigned int nbytes)766 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
767 		       struct scatterlist *src, unsigned int nbytes)
768 {
769 	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
770 	be128 buf[8];
771 	struct xts_crypt_req req = {
772 		.tbuf = buf,
773 		.tbuflen = sizeof(buf),
774 
775 		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
776 		.tweak_fn = aesni_xts_tweak,
777 		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
778 		.crypt_fn = lrw_xts_decrypt_callback,
779 	};
780 	int ret;
781 
782 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
783 
784 	kernel_fpu_begin();
785 	ret = xts_crypt(desc, dst, src, nbytes, &req);
786 	kernel_fpu_end();
787 
788 	return ret;
789 }
790 
791 #endif
792 
793 #ifdef CONFIG_X86_64
rfc4106_init(struct crypto_aead * aead)794 static int rfc4106_init(struct crypto_aead *aead)
795 {
796 	struct cryptd_aead *cryptd_tfm;
797 	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
798 
799 	cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
800 				       CRYPTO_ALG_INTERNAL,
801 				       CRYPTO_ALG_INTERNAL);
802 	if (IS_ERR(cryptd_tfm))
803 		return PTR_ERR(cryptd_tfm);
804 
805 	*ctx = cryptd_tfm;
806 	crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
807 	return 0;
808 }
809 
rfc4106_exit(struct crypto_aead * aead)810 static void rfc4106_exit(struct crypto_aead *aead)
811 {
812 	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
813 
814 	cryptd_free_aead(*ctx);
815 }
816 
817 static void
rfc4106_set_hash_subkey_done(struct crypto_async_request * req,int err)818 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
819 {
820 	struct aesni_gcm_set_hash_subkey_result *result = req->data;
821 
822 	if (err == -EINPROGRESS)
823 		return;
824 	result->err = err;
825 	complete(&result->completion);
826 }
827 
828 static int
rfc4106_set_hash_subkey(u8 * hash_subkey,const u8 * key,unsigned int key_len)829 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
830 {
831 	struct crypto_ablkcipher *ctr_tfm;
832 	struct ablkcipher_request *req;
833 	int ret = -EINVAL;
834 	struct aesni_hash_subkey_req_data *req_data;
835 
836 	ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
837 	if (IS_ERR(ctr_tfm))
838 		return PTR_ERR(ctr_tfm);
839 
840 	ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
841 	if (ret)
842 		goto out_free_ablkcipher;
843 
844 	ret = -ENOMEM;
845 	req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
846 	if (!req)
847 		goto out_free_ablkcipher;
848 
849 	req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
850 	if (!req_data)
851 		goto out_free_request;
852 
853 	memset(req_data->iv, 0, sizeof(req_data->iv));
854 
855 	/* Clear the data in the hash sub key container to zero.*/
856 	/* We want to cipher all zeros to create the hash sub key. */
857 	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
858 
859 	init_completion(&req_data->result.completion);
860 	sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
861 	ablkcipher_request_set_tfm(req, ctr_tfm);
862 	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
863 					CRYPTO_TFM_REQ_MAY_BACKLOG,
864 					rfc4106_set_hash_subkey_done,
865 					&req_data->result);
866 
867 	ablkcipher_request_set_crypt(req, &req_data->sg,
868 		&req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
869 
870 	ret = crypto_ablkcipher_encrypt(req);
871 	if (ret == -EINPROGRESS || ret == -EBUSY) {
872 		ret = wait_for_completion_interruptible
873 			(&req_data->result.completion);
874 		if (!ret)
875 			ret = req_data->result.err;
876 	}
877 	kfree(req_data);
878 out_free_request:
879 	ablkcipher_request_free(req);
880 out_free_ablkcipher:
881 	crypto_free_ablkcipher(ctr_tfm);
882 	return ret;
883 }
884 
common_rfc4106_set_key(struct crypto_aead * aead,const u8 * key,unsigned int key_len)885 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
886 				  unsigned int key_len)
887 {
888 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
889 
890 	if (key_len < 4) {
891 		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
892 		return -EINVAL;
893 	}
894 	/*Account for 4 byte nonce at the end.*/
895 	key_len -= 4;
896 
897 	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
898 
899 	return aes_set_key_common(crypto_aead_tfm(aead),
900 				  &ctx->aes_key_expanded, key, key_len) ?:
901 	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
902 }
903 
rfc4106_set_key(struct crypto_aead * parent,const u8 * key,unsigned int key_len)904 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
905 			   unsigned int key_len)
906 {
907 	struct cryptd_aead **ctx = crypto_aead_ctx(parent);
908 	struct cryptd_aead *cryptd_tfm = *ctx;
909 
910 	return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
911 }
912 
common_rfc4106_set_authsize(struct crypto_aead * aead,unsigned int authsize)913 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
914 				       unsigned int authsize)
915 {
916 	switch (authsize) {
917 	case 8:
918 	case 12:
919 	case 16:
920 		break;
921 	default:
922 		return -EINVAL;
923 	}
924 
925 	return 0;
926 }
927 
928 /* This is the Integrity Check Value (aka the authentication tag length and can
929  * be 8, 12 or 16 bytes long. */
rfc4106_set_authsize(struct crypto_aead * parent,unsigned int authsize)930 static int rfc4106_set_authsize(struct crypto_aead *parent,
931 				unsigned int authsize)
932 {
933 	struct cryptd_aead **ctx = crypto_aead_ctx(parent);
934 	struct cryptd_aead *cryptd_tfm = *ctx;
935 
936 	return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
937 }
938 
helper_rfc4106_encrypt(struct aead_request * req)939 static int helper_rfc4106_encrypt(struct aead_request *req)
940 {
941 	u8 one_entry_in_sg = 0;
942 	u8 *src, *dst, *assoc;
943 	__be32 counter = cpu_to_be32(1);
944 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
945 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
946 	void *aes_ctx = &(ctx->aes_key_expanded);
947 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
948 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
949 	struct scatter_walk src_sg_walk;
950 	struct scatter_walk dst_sg_walk;
951 	unsigned int i;
952 
953 	/* Assuming we are supporting rfc4106 64-bit extended */
954 	/* sequence numbers We need to have the AAD length equal */
955 	/* to 16 or 20 bytes */
956 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
957 		return -EINVAL;
958 
959 	/* IV below built */
960 	for (i = 0; i < 4; i++)
961 		*(iv+i) = ctx->nonce[i];
962 	for (i = 0; i < 8; i++)
963 		*(iv+4+i) = req->iv[i];
964 	*((__be32 *)(iv+12)) = counter;
965 
966 	if (sg_is_last(req->src) &&
967 	    req->src->offset + req->src->length <= PAGE_SIZE &&
968 	    sg_is_last(req->dst) && req->dst->length &&
969 	    req->dst->offset + req->dst->length <= PAGE_SIZE) {
970 		one_entry_in_sg = 1;
971 		scatterwalk_start(&src_sg_walk, req->src);
972 		assoc = scatterwalk_map(&src_sg_walk);
973 		src = assoc + req->assoclen;
974 		dst = src;
975 		if (unlikely(req->src != req->dst)) {
976 			scatterwalk_start(&dst_sg_walk, req->dst);
977 			dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
978 		}
979 	} else {
980 		/* Allocate memory for src, dst, assoc */
981 		assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
982 			GFP_ATOMIC);
983 		if (unlikely(!assoc))
984 			return -ENOMEM;
985 		scatterwalk_map_and_copy(assoc, req->src, 0,
986 					 req->assoclen + req->cryptlen, 0);
987 		src = assoc + req->assoclen;
988 		dst = src;
989 	}
990 
991 	kernel_fpu_begin();
992 	aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
993 			  ctx->hash_subkey, assoc, req->assoclen - 8,
994 			  dst + req->cryptlen, auth_tag_len);
995 	kernel_fpu_end();
996 
997 	/* The authTag (aka the Integrity Check Value) needs to be written
998 	 * back to the packet. */
999 	if (one_entry_in_sg) {
1000 		if (unlikely(req->src != req->dst)) {
1001 			scatterwalk_unmap(dst - req->assoclen);
1002 			scatterwalk_advance(&dst_sg_walk, req->dst->length);
1003 			scatterwalk_done(&dst_sg_walk, 1, 0);
1004 		}
1005 		scatterwalk_unmap(assoc);
1006 		scatterwalk_advance(&src_sg_walk, req->src->length);
1007 		scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
1008 	} else {
1009 		scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1010 					 req->cryptlen + auth_tag_len, 1);
1011 		kfree(assoc);
1012 	}
1013 	return 0;
1014 }
1015 
helper_rfc4106_decrypt(struct aead_request * req)1016 static int helper_rfc4106_decrypt(struct aead_request *req)
1017 {
1018 	u8 one_entry_in_sg = 0;
1019 	u8 *src, *dst, *assoc;
1020 	unsigned long tempCipherLen = 0;
1021 	__be32 counter = cpu_to_be32(1);
1022 	int retval = 0;
1023 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1024 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1025 	void *aes_ctx = &(ctx->aes_key_expanded);
1026 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1027 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1028 	u8 authTag[16];
1029 	struct scatter_walk src_sg_walk;
1030 	struct scatter_walk dst_sg_walk;
1031 	unsigned int i;
1032 
1033 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
1034 		return -EINVAL;
1035 
1036 	/* Assuming we are supporting rfc4106 64-bit extended */
1037 	/* sequence numbers We need to have the AAD length */
1038 	/* equal to 16 or 20 bytes */
1039 
1040 	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1041 	/* IV below built */
1042 	for (i = 0; i < 4; i++)
1043 		*(iv+i) = ctx->nonce[i];
1044 	for (i = 0; i < 8; i++)
1045 		*(iv+4+i) = req->iv[i];
1046 	*((__be32 *)(iv+12)) = counter;
1047 
1048 	if (sg_is_last(req->src) &&
1049 	    req->src->offset + req->src->length <= PAGE_SIZE &&
1050 	    sg_is_last(req->dst) &&
1051 	    req->dst->offset + req->dst->length <= PAGE_SIZE) {
1052 		one_entry_in_sg = 1;
1053 		scatterwalk_start(&src_sg_walk, req->src);
1054 		assoc = scatterwalk_map(&src_sg_walk);
1055 		src = assoc + req->assoclen;
1056 		dst = src;
1057 		if (unlikely(req->src != req->dst)) {
1058 			scatterwalk_start(&dst_sg_walk, req->dst);
1059 			dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
1060 		}
1061 
1062 	} else {
1063 		/* Allocate memory for src, dst, assoc */
1064 		assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1065 		if (!assoc)
1066 			return -ENOMEM;
1067 		scatterwalk_map_and_copy(assoc, req->src, 0,
1068 					 req->assoclen + req->cryptlen, 0);
1069 		src = assoc + req->assoclen;
1070 		dst = src;
1071 	}
1072 
1073 	kernel_fpu_begin();
1074 	aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
1075 			  ctx->hash_subkey, assoc, req->assoclen - 8,
1076 			  authTag, auth_tag_len);
1077 	kernel_fpu_end();
1078 
1079 	/* Compare generated tag with passed in tag. */
1080 	retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
1081 		-EBADMSG : 0;
1082 
1083 	if (one_entry_in_sg) {
1084 		if (unlikely(req->src != req->dst)) {
1085 			scatterwalk_unmap(dst - req->assoclen);
1086 			scatterwalk_advance(&dst_sg_walk, req->dst->length);
1087 			scatterwalk_done(&dst_sg_walk, 1, 0);
1088 		}
1089 		scatterwalk_unmap(assoc);
1090 		scatterwalk_advance(&src_sg_walk, req->src->length);
1091 		scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
1092 	} else {
1093 		scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1094 					 tempCipherLen, 1);
1095 		kfree(assoc);
1096 	}
1097 	return retval;
1098 }
1099 
rfc4106_encrypt(struct aead_request * req)1100 static int rfc4106_encrypt(struct aead_request *req)
1101 {
1102 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1103 	struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1104 	struct cryptd_aead *cryptd_tfm = *ctx;
1105 
1106 	aead_request_set_tfm(req, irq_fpu_usable() ?
1107 				  cryptd_aead_child(cryptd_tfm) :
1108 				  &cryptd_tfm->base);
1109 
1110 	return crypto_aead_encrypt(req);
1111 }
1112 
rfc4106_decrypt(struct aead_request * req)1113 static int rfc4106_decrypt(struct aead_request *req)
1114 {
1115 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1116 	struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1117 	struct cryptd_aead *cryptd_tfm = *ctx;
1118 
1119 	aead_request_set_tfm(req, irq_fpu_usable() ?
1120 				  cryptd_aead_child(cryptd_tfm) :
1121 				  &cryptd_tfm->base);
1122 
1123 	return crypto_aead_decrypt(req);
1124 }
1125 #endif
1126 
1127 static struct crypto_alg aesni_algs[] = { {
1128 	.cra_name		= "aes",
1129 	.cra_driver_name	= "aes-aesni",
1130 	.cra_priority		= 300,
1131 	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
1132 	.cra_blocksize		= AES_BLOCK_SIZE,
1133 	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
1134 				  AESNI_ALIGN - 1,
1135 	.cra_alignmask		= 0,
1136 	.cra_module		= THIS_MODULE,
1137 	.cra_u	= {
1138 		.cipher	= {
1139 			.cia_min_keysize	= AES_MIN_KEY_SIZE,
1140 			.cia_max_keysize	= AES_MAX_KEY_SIZE,
1141 			.cia_setkey		= aes_set_key,
1142 			.cia_encrypt		= aes_encrypt,
1143 			.cia_decrypt		= aes_decrypt
1144 		}
1145 	}
1146 }, {
1147 	.cra_name		= "__aes-aesni",
1148 	.cra_driver_name	= "__driver-aes-aesni",
1149 	.cra_priority		= 0,
1150 	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
1151 	.cra_blocksize		= AES_BLOCK_SIZE,
1152 	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
1153 				  AESNI_ALIGN - 1,
1154 	.cra_alignmask		= 0,
1155 	.cra_module		= THIS_MODULE,
1156 	.cra_u	= {
1157 		.cipher	= {
1158 			.cia_min_keysize	= AES_MIN_KEY_SIZE,
1159 			.cia_max_keysize	= AES_MAX_KEY_SIZE,
1160 			.cia_setkey		= aes_set_key,
1161 			.cia_encrypt		= __aes_encrypt,
1162 			.cia_decrypt		= __aes_decrypt
1163 		}
1164 	}
1165 }, {
1166 	.cra_name		= "__ecb-aes-aesni",
1167 	.cra_driver_name	= "__driver-ecb-aes-aesni",
1168 	.cra_priority		= 0,
1169 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
1170 				  CRYPTO_ALG_INTERNAL,
1171 	.cra_blocksize		= AES_BLOCK_SIZE,
1172 	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
1173 				  AESNI_ALIGN - 1,
1174 	.cra_alignmask		= 0,
1175 	.cra_type		= &crypto_blkcipher_type,
1176 	.cra_module		= THIS_MODULE,
1177 	.cra_u = {
1178 		.blkcipher = {
1179 			.min_keysize	= AES_MIN_KEY_SIZE,
1180 			.max_keysize	= AES_MAX_KEY_SIZE,
1181 			.setkey		= aes_set_key,
1182 			.encrypt	= ecb_encrypt,
1183 			.decrypt	= ecb_decrypt,
1184 		},
1185 	},
1186 }, {
1187 	.cra_name		= "__cbc-aes-aesni",
1188 	.cra_driver_name	= "__driver-cbc-aes-aesni",
1189 	.cra_priority		= 0,
1190 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
1191 				  CRYPTO_ALG_INTERNAL,
1192 	.cra_blocksize		= AES_BLOCK_SIZE,
1193 	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
1194 				  AESNI_ALIGN - 1,
1195 	.cra_alignmask		= 0,
1196 	.cra_type		= &crypto_blkcipher_type,
1197 	.cra_module		= THIS_MODULE,
1198 	.cra_u = {
1199 		.blkcipher = {
1200 			.min_keysize	= AES_MIN_KEY_SIZE,
1201 			.max_keysize	= AES_MAX_KEY_SIZE,
1202 			.setkey		= aes_set_key,
1203 			.encrypt	= cbc_encrypt,
1204 			.decrypt	= cbc_decrypt,
1205 		},
1206 	},
1207 }, {
1208 	.cra_name		= "ecb(aes)",
1209 	.cra_driver_name	= "ecb-aes-aesni",
1210 	.cra_priority		= 400,
1211 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1212 	.cra_blocksize		= AES_BLOCK_SIZE,
1213 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
1214 	.cra_alignmask		= 0,
1215 	.cra_type		= &crypto_ablkcipher_type,
1216 	.cra_module		= THIS_MODULE,
1217 	.cra_init		= ablk_ecb_init,
1218 	.cra_exit		= ablk_exit,
1219 	.cra_u = {
1220 		.ablkcipher = {
1221 			.min_keysize	= AES_MIN_KEY_SIZE,
1222 			.max_keysize	= AES_MAX_KEY_SIZE,
1223 			.setkey		= ablk_set_key,
1224 			.encrypt	= ablk_encrypt,
1225 			.decrypt	= ablk_decrypt,
1226 		},
1227 	},
1228 }, {
1229 	.cra_name		= "cbc(aes)",
1230 	.cra_driver_name	= "cbc-aes-aesni",
1231 	.cra_priority		= 400,
1232 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1233 	.cra_blocksize		= AES_BLOCK_SIZE,
1234 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
1235 	.cra_alignmask		= 0,
1236 	.cra_type		= &crypto_ablkcipher_type,
1237 	.cra_module		= THIS_MODULE,
1238 	.cra_init		= ablk_cbc_init,
1239 	.cra_exit		= ablk_exit,
1240 	.cra_u = {
1241 		.ablkcipher = {
1242 			.min_keysize	= AES_MIN_KEY_SIZE,
1243 			.max_keysize	= AES_MAX_KEY_SIZE,
1244 			.ivsize		= AES_BLOCK_SIZE,
1245 			.setkey		= ablk_set_key,
1246 			.encrypt	= ablk_encrypt,
1247 			.decrypt	= ablk_decrypt,
1248 		},
1249 	},
1250 #ifdef CONFIG_X86_64
1251 }, {
1252 	.cra_name		= "__ctr-aes-aesni",
1253 	.cra_driver_name	= "__driver-ctr-aes-aesni",
1254 	.cra_priority		= 0,
1255 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
1256 				  CRYPTO_ALG_INTERNAL,
1257 	.cra_blocksize		= 1,
1258 	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
1259 				  AESNI_ALIGN - 1,
1260 	.cra_alignmask		= 0,
1261 	.cra_type		= &crypto_blkcipher_type,
1262 	.cra_module		= THIS_MODULE,
1263 	.cra_u = {
1264 		.blkcipher = {
1265 			.min_keysize	= AES_MIN_KEY_SIZE,
1266 			.max_keysize	= AES_MAX_KEY_SIZE,
1267 			.ivsize		= AES_BLOCK_SIZE,
1268 			.setkey		= aes_set_key,
1269 			.encrypt	= ctr_crypt,
1270 			.decrypt	= ctr_crypt,
1271 		},
1272 	},
1273 }, {
1274 	.cra_name		= "ctr(aes)",
1275 	.cra_driver_name	= "ctr-aes-aesni",
1276 	.cra_priority		= 400,
1277 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1278 	.cra_blocksize		= 1,
1279 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
1280 	.cra_alignmask		= 0,
1281 	.cra_type		= &crypto_ablkcipher_type,
1282 	.cra_module		= THIS_MODULE,
1283 	.cra_init		= ablk_ctr_init,
1284 	.cra_exit		= ablk_exit,
1285 	.cra_u = {
1286 		.ablkcipher = {
1287 			.min_keysize	= AES_MIN_KEY_SIZE,
1288 			.max_keysize	= AES_MAX_KEY_SIZE,
1289 			.ivsize		= AES_BLOCK_SIZE,
1290 			.setkey		= ablk_set_key,
1291 			.encrypt	= ablk_encrypt,
1292 			.decrypt	= ablk_encrypt,
1293 			.geniv		= "chainiv",
1294 		},
1295 	},
1296 #endif
1297 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
1298 }, {
1299 	.cra_name		= "pcbc(aes)",
1300 	.cra_driver_name	= "pcbc-aes-aesni",
1301 	.cra_priority		= 400,
1302 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1303 	.cra_blocksize		= AES_BLOCK_SIZE,
1304 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
1305 	.cra_alignmask		= 0,
1306 	.cra_type		= &crypto_ablkcipher_type,
1307 	.cra_module		= THIS_MODULE,
1308 	.cra_init		= ablk_pcbc_init,
1309 	.cra_exit		= ablk_exit,
1310 	.cra_u = {
1311 		.ablkcipher = {
1312 			.min_keysize	= AES_MIN_KEY_SIZE,
1313 			.max_keysize	= AES_MAX_KEY_SIZE,
1314 			.ivsize		= AES_BLOCK_SIZE,
1315 			.setkey		= ablk_set_key,
1316 			.encrypt	= ablk_encrypt,
1317 			.decrypt	= ablk_decrypt,
1318 		},
1319 	},
1320 #endif
1321 }, {
1322 	.cra_name		= "__lrw-aes-aesni",
1323 	.cra_driver_name	= "__driver-lrw-aes-aesni",
1324 	.cra_priority		= 0,
1325 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
1326 				  CRYPTO_ALG_INTERNAL,
1327 	.cra_blocksize		= AES_BLOCK_SIZE,
1328 	.cra_ctxsize		= sizeof(struct aesni_lrw_ctx),
1329 	.cra_alignmask		= 0,
1330 	.cra_type		= &crypto_blkcipher_type,
1331 	.cra_module		= THIS_MODULE,
1332 	.cra_exit		= lrw_aesni_exit_tfm,
1333 	.cra_u = {
1334 		.blkcipher = {
1335 			.min_keysize	= AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1336 			.max_keysize	= AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1337 			.ivsize		= AES_BLOCK_SIZE,
1338 			.setkey		= lrw_aesni_setkey,
1339 			.encrypt	= lrw_encrypt,
1340 			.decrypt	= lrw_decrypt,
1341 		},
1342 	},
1343 }, {
1344 	.cra_name		= "__xts-aes-aesni",
1345 	.cra_driver_name	= "__driver-xts-aes-aesni",
1346 	.cra_priority		= 0,
1347 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
1348 				  CRYPTO_ALG_INTERNAL,
1349 	.cra_blocksize		= AES_BLOCK_SIZE,
1350 	.cra_ctxsize		= sizeof(struct aesni_xts_ctx),
1351 	.cra_alignmask		= 0,
1352 	.cra_type		= &crypto_blkcipher_type,
1353 	.cra_module		= THIS_MODULE,
1354 	.cra_u = {
1355 		.blkcipher = {
1356 			.min_keysize	= 2 * AES_MIN_KEY_SIZE,
1357 			.max_keysize	= 2 * AES_MAX_KEY_SIZE,
1358 			.ivsize		= AES_BLOCK_SIZE,
1359 			.setkey		= xts_aesni_setkey,
1360 			.encrypt	= xts_encrypt,
1361 			.decrypt	= xts_decrypt,
1362 		},
1363 	},
1364 }, {
1365 	.cra_name		= "lrw(aes)",
1366 	.cra_driver_name	= "lrw-aes-aesni",
1367 	.cra_priority		= 400,
1368 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1369 	.cra_blocksize		= AES_BLOCK_SIZE,
1370 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
1371 	.cra_alignmask		= 0,
1372 	.cra_type		= &crypto_ablkcipher_type,
1373 	.cra_module		= THIS_MODULE,
1374 	.cra_init		= ablk_init,
1375 	.cra_exit		= ablk_exit,
1376 	.cra_u = {
1377 		.ablkcipher = {
1378 			.min_keysize	= AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1379 			.max_keysize	= AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1380 			.ivsize		= AES_BLOCK_SIZE,
1381 			.setkey		= ablk_set_key,
1382 			.encrypt	= ablk_encrypt,
1383 			.decrypt	= ablk_decrypt,
1384 		},
1385 	},
1386 }, {
1387 	.cra_name		= "xts(aes)",
1388 	.cra_driver_name	= "xts-aes-aesni",
1389 	.cra_priority		= 400,
1390 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1391 	.cra_blocksize		= AES_BLOCK_SIZE,
1392 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
1393 	.cra_alignmask		= 0,
1394 	.cra_type		= &crypto_ablkcipher_type,
1395 	.cra_module		= THIS_MODULE,
1396 	.cra_init		= ablk_init,
1397 	.cra_exit		= ablk_exit,
1398 	.cra_u = {
1399 		.ablkcipher = {
1400 			.min_keysize	= 2 * AES_MIN_KEY_SIZE,
1401 			.max_keysize	= 2 * AES_MAX_KEY_SIZE,
1402 			.ivsize		= AES_BLOCK_SIZE,
1403 			.setkey		= ablk_set_key,
1404 			.encrypt	= ablk_encrypt,
1405 			.decrypt	= ablk_decrypt,
1406 		},
1407 	},
1408 } };
1409 
1410 #ifdef CONFIG_X86_64
1411 static struct aead_alg aesni_aead_algs[] = { {
1412 	.setkey			= common_rfc4106_set_key,
1413 	.setauthsize		= common_rfc4106_set_authsize,
1414 	.encrypt		= helper_rfc4106_encrypt,
1415 	.decrypt		= helper_rfc4106_decrypt,
1416 	.ivsize			= 8,
1417 	.maxauthsize		= 16,
1418 	.base = {
1419 		.cra_name		= "__gcm-aes-aesni",
1420 		.cra_driver_name	= "__driver-gcm-aes-aesni",
1421 		.cra_flags		= CRYPTO_ALG_INTERNAL,
1422 		.cra_blocksize		= 1,
1423 		.cra_ctxsize		= sizeof(struct aesni_rfc4106_gcm_ctx),
1424 		.cra_alignmask		= AESNI_ALIGN - 1,
1425 		.cra_module		= THIS_MODULE,
1426 	},
1427 }, {
1428 	.init			= rfc4106_init,
1429 	.exit			= rfc4106_exit,
1430 	.setkey			= rfc4106_set_key,
1431 	.setauthsize		= rfc4106_set_authsize,
1432 	.encrypt		= rfc4106_encrypt,
1433 	.decrypt		= rfc4106_decrypt,
1434 	.ivsize			= 8,
1435 	.maxauthsize		= 16,
1436 	.base = {
1437 		.cra_name		= "rfc4106(gcm(aes))",
1438 		.cra_driver_name	= "rfc4106-gcm-aesni",
1439 		.cra_priority		= 400,
1440 		.cra_flags		= CRYPTO_ALG_ASYNC,
1441 		.cra_blocksize		= 1,
1442 		.cra_ctxsize		= sizeof(struct cryptd_aead *),
1443 		.cra_module		= THIS_MODULE,
1444 	},
1445 } };
1446 #else
1447 static struct aead_alg aesni_aead_algs[0];
1448 #endif
1449 
1450 
1451 static const struct x86_cpu_id aesni_cpu_id[] = {
1452 	X86_FEATURE_MATCH(X86_FEATURE_AES),
1453 	{}
1454 };
1455 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1456 
aesni_init(void)1457 static int __init aesni_init(void)
1458 {
1459 	int err;
1460 
1461 	if (!x86_match_cpu(aesni_cpu_id))
1462 		return -ENODEV;
1463 #ifdef CONFIG_X86_64
1464 #ifdef CONFIG_AS_AVX2
1465 	if (boot_cpu_has(X86_FEATURE_AVX2)) {
1466 		pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1467 		aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1468 		aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1469 	} else
1470 #endif
1471 #ifdef CONFIG_AS_AVX
1472 	if (boot_cpu_has(X86_FEATURE_AVX)) {
1473 		pr_info("AVX version of gcm_enc/dec engaged.\n");
1474 		aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1475 		aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1476 	} else
1477 #endif
1478 	{
1479 		pr_info("SSE version of gcm_enc/dec engaged.\n");
1480 		aesni_gcm_enc_tfm = aesni_gcm_enc;
1481 		aesni_gcm_dec_tfm = aesni_gcm_dec;
1482 	}
1483 	aesni_ctr_enc_tfm = aesni_ctr_enc;
1484 #ifdef CONFIG_AS_AVX
1485 	if (cpu_has_avx) {
1486 		/* optimize performance of ctr mode encryption transform */
1487 		aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1488 		pr_info("AES CTR mode by8 optimization enabled\n");
1489 	}
1490 #endif
1491 #endif
1492 
1493 	err = crypto_fpu_init();
1494 	if (err)
1495 		return err;
1496 
1497 	err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1498 	if (err)
1499 		goto fpu_exit;
1500 
1501 	err = crypto_register_aeads(aesni_aead_algs,
1502 				    ARRAY_SIZE(aesni_aead_algs));
1503 	if (err)
1504 		goto unregister_algs;
1505 
1506 	return err;
1507 
1508 unregister_algs:
1509 	crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1510 fpu_exit:
1511 	crypto_fpu_exit();
1512 	return err;
1513 }
1514 
aesni_exit(void)1515 static void __exit aesni_exit(void)
1516 {
1517 	crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1518 	crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1519 
1520 	crypto_fpu_exit();
1521 }
1522 
1523 late_initcall(aesni_init);
1524 module_exit(aesni_exit);
1525 
1526 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1527 MODULE_LICENSE("GPL");
1528 MODULE_ALIAS_CRYPTO("aes");
1529