• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Support for Intel AES-NI instructions. This file contains glue
4  * code, the real AES implementation is in intel-aes_asm.S.
5  *
6  * Copyright (C) 2008, Intel Corp.
7  *    Author: Huang Ying <ying.huang@intel.com>
8  *
9  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
10  * interface for 64-bit kernels.
11  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
12  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
13  *             Tadeusz Struk (tadeusz.struk@intel.com)
14  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
15  *    Copyright (c) 2010, Intel Corporation.
16  */
17 
18 #include <linux/hardirq.h>
19 #include <linux/types.h>
20 #include <linux/module.h>
21 #include <linux/err.h>
22 #include <crypto/algapi.h>
23 #include <crypto/aes.h>
24 #include <crypto/ctr.h>
25 #include <crypto/b128ops.h>
26 #include <crypto/gcm.h>
27 #include <crypto/xts.h>
28 #include <asm/cpu_device_id.h>
29 #include <asm/simd.h>
30 #include <crypto/scatterwalk.h>
31 #include <crypto/internal/aead.h>
32 #include <crypto/internal/simd.h>
33 #include <crypto/internal/skcipher.h>
34 #include <linux/workqueue.h>
35 #include <linux/spinlock.h>
36 #ifdef CONFIG_X86_64
37 #include <asm/crypto/glue_helper.h>
38 #endif
39 
40 
41 #define AESNI_ALIGN	16
42 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
43 #define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE - 1))
44 #define RFC4106_HASH_SUBKEY_SIZE 16
45 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
46 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
47 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
48 
49 /* This data is stored at the end of the crypto_tfm struct.
50  * It's a type of per "session" data storage location.
51  * This needs to be 16 byte aligned.
52  */
53 struct aesni_rfc4106_gcm_ctx {
54 	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
55 	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
56 	u8 nonce[4];
57 };
58 
59 struct generic_gcmaes_ctx {
60 	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
61 	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
62 };
63 
64 struct aesni_xts_ctx {
65 	u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
66 	u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
67 };
68 
69 #define GCM_BLOCK_LEN 16
70 
71 struct gcm_context_data {
72 	/* init, update and finalize context data */
73 	u8 aad_hash[GCM_BLOCK_LEN];
74 	u64 aad_length;
75 	u64 in_length;
76 	u8 partial_block_enc_key[GCM_BLOCK_LEN];
77 	u8 orig_IV[GCM_BLOCK_LEN];
78 	u8 current_counter[GCM_BLOCK_LEN];
79 	u64 partial_block_len;
80 	u64 unused;
81 	u8 hash_keys[GCM_BLOCK_LEN * 16];
82 };
83 
84 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
85 			     unsigned int key_len);
86 asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
87 asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
88 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
89 			      const u8 *in, unsigned int len);
90 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
91 			      const u8 *in, unsigned int len);
92 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
93 			      const u8 *in, unsigned int len, u8 *iv);
94 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
95 			      const u8 *in, unsigned int len, u8 *iv);
96 
97 #define AVX_GEN2_OPTSIZE 640
98 #define AVX_GEN4_OPTSIZE 4096
99 
100 asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
101 				  const u8 *in, unsigned int len, u8 *iv);
102 
103 asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
104 				  const u8 *in, unsigned int len, u8 *iv);
105 
106 #ifdef CONFIG_X86_64
107 
108 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
109 			      const u8 *in, unsigned int len, u8 *iv);
110 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
111 			      const u8 *in, unsigned int len, u8 *iv);
112 
113 /* asmlinkage void aesni_gcm_enc()
114  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
115  * struct gcm_context_data.  May be uninitialized.
116  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
117  * const u8 *in, Plaintext input
118  * unsigned long plaintext_len, Length of data in bytes for encryption.
119  * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
120  *         16-byte aligned pointer.
121  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
122  * const u8 *aad, Additional Authentication Data (AAD)
123  * unsigned long aad_len, Length of AAD in bytes.
124  * u8 *auth_tag, Authenticated Tag output.
125  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
126  *          Valid values are 16 (most likely), 12 or 8.
127  */
128 asmlinkage void aesni_gcm_enc(void *ctx,
129 			struct gcm_context_data *gdata, u8 *out,
130 			const u8 *in, unsigned long plaintext_len, u8 *iv,
131 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
132 			u8 *auth_tag, unsigned long auth_tag_len);
133 
134 /* asmlinkage void aesni_gcm_dec()
135  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
136  * struct gcm_context_data.  May be uninitialized.
137  * u8 *out, Plaintext output. Decrypt in-place is allowed.
138  * const u8 *in, Ciphertext input
139  * unsigned long ciphertext_len, Length of data in bytes for decryption.
140  * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
141  *         16-byte aligned pointer.
142  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
143  * const u8 *aad, Additional Authentication Data (AAD)
144  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
145  * to be 8 or 12 bytes
146  * u8 *auth_tag, Authenticated Tag output.
147  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
148  * Valid values are 16 (most likely), 12 or 8.
149  */
150 asmlinkage void aesni_gcm_dec(void *ctx,
151 			struct gcm_context_data *gdata, u8 *out,
152 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
153 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
154 			u8 *auth_tag, unsigned long auth_tag_len);
155 
156 /* Scatter / Gather routines, with args similar to above */
157 asmlinkage void aesni_gcm_init(void *ctx,
158 			       struct gcm_context_data *gdata,
159 			       u8 *iv,
160 			       u8 *hash_subkey, const u8 *aad,
161 			       unsigned long aad_len);
162 asmlinkage void aesni_gcm_enc_update(void *ctx,
163 				     struct gcm_context_data *gdata, u8 *out,
164 				     const u8 *in, unsigned long plaintext_len);
165 asmlinkage void aesni_gcm_dec_update(void *ctx,
166 				     struct gcm_context_data *gdata, u8 *out,
167 				     const u8 *in,
168 				     unsigned long ciphertext_len);
169 asmlinkage void aesni_gcm_finalize(void *ctx,
170 				   struct gcm_context_data *gdata,
171 				   u8 *auth_tag, unsigned long auth_tag_len);
172 
173 static const struct aesni_gcm_tfm_s {
174 	void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv,
175 		     u8 *hash_subkey, const u8 *aad, unsigned long aad_len);
176 	void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
177 			   const u8 *in, unsigned long plaintext_len);
178 	void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
179 			   const u8 *in, unsigned long ciphertext_len);
180 	void (*finalize)(void *ctx, struct gcm_context_data *gdata,
181 			 u8 *auth_tag, unsigned long auth_tag_len);
182 } *aesni_gcm_tfm;
183 
184 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = {
185 	.init = &aesni_gcm_init,
186 	.enc_update = &aesni_gcm_enc_update,
187 	.dec_update = &aesni_gcm_dec_update,
188 	.finalize = &aesni_gcm_finalize,
189 };
190 
191 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
192 		void *keys, u8 *out, unsigned int num_bytes);
193 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
194 		void *keys, u8 *out, unsigned int num_bytes);
195 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
196 		void *keys, u8 *out, unsigned int num_bytes);
197 /*
198  * asmlinkage void aesni_gcm_init_avx_gen2()
199  * gcm_data *my_ctx_data, context data
200  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
201  */
202 asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
203 					struct gcm_context_data *gdata,
204 					u8 *iv,
205 					u8 *hash_subkey,
206 					const u8 *aad,
207 					unsigned long aad_len);
208 
209 asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
210 				     struct gcm_context_data *gdata, u8 *out,
211 				     const u8 *in, unsigned long plaintext_len);
212 asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
213 				     struct gcm_context_data *gdata, u8 *out,
214 				     const u8 *in,
215 				     unsigned long ciphertext_len);
216 asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
217 				   struct gcm_context_data *gdata,
218 				   u8 *auth_tag, unsigned long auth_tag_len);
219 
220 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx,
221 				struct gcm_context_data *gdata, u8 *out,
222 			const u8 *in, unsigned long plaintext_len, u8 *iv,
223 			const u8 *aad, unsigned long aad_len,
224 			u8 *auth_tag, unsigned long auth_tag_len);
225 
226 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx,
227 				struct gcm_context_data *gdata, u8 *out,
228 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
229 			const u8 *aad, unsigned long aad_len,
230 			u8 *auth_tag, unsigned long auth_tag_len);
231 
232 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
233 	.init = &aesni_gcm_init_avx_gen2,
234 	.enc_update = &aesni_gcm_enc_update_avx_gen2,
235 	.dec_update = &aesni_gcm_dec_update_avx_gen2,
236 	.finalize = &aesni_gcm_finalize_avx_gen2,
237 };
238 
239 /*
240  * asmlinkage void aesni_gcm_init_avx_gen4()
241  * gcm_data *my_ctx_data, context data
242  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
243  */
244 asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
245 					struct gcm_context_data *gdata,
246 					u8 *iv,
247 					u8 *hash_subkey,
248 					const u8 *aad,
249 					unsigned long aad_len);
250 
251 asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
252 				     struct gcm_context_data *gdata, u8 *out,
253 				     const u8 *in, unsigned long plaintext_len);
254 asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
255 				     struct gcm_context_data *gdata, u8 *out,
256 				     const u8 *in,
257 				     unsigned long ciphertext_len);
258 asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
259 				   struct gcm_context_data *gdata,
260 				   u8 *auth_tag, unsigned long auth_tag_len);
261 
262 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx,
263 				struct gcm_context_data *gdata, u8 *out,
264 			const u8 *in, unsigned long plaintext_len, u8 *iv,
265 			const u8 *aad, unsigned long aad_len,
266 			u8 *auth_tag, unsigned long auth_tag_len);
267 
268 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx,
269 				struct gcm_context_data *gdata, u8 *out,
270 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
271 			const u8 *aad, unsigned long aad_len,
272 			u8 *auth_tag, unsigned long auth_tag_len);
273 
274 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
275 	.init = &aesni_gcm_init_avx_gen4,
276 	.enc_update = &aesni_gcm_enc_update_avx_gen4,
277 	.dec_update = &aesni_gcm_dec_update_avx_gen4,
278 	.finalize = &aesni_gcm_finalize_avx_gen4,
279 };
280 
281 static inline struct
aesni_rfc4106_gcm_ctx_get(struct crypto_aead * tfm)282 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
283 {
284 	unsigned long align = AESNI_ALIGN;
285 
286 	if (align <= crypto_tfm_ctx_alignment())
287 		align = 1;
288 	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
289 }
290 
291 static inline struct
generic_gcmaes_ctx_get(struct crypto_aead * tfm)292 generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
293 {
294 	unsigned long align = AESNI_ALIGN;
295 
296 	if (align <= crypto_tfm_ctx_alignment())
297 		align = 1;
298 	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
299 }
300 #endif
301 
aes_ctx(void * raw_ctx)302 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
303 {
304 	unsigned long addr = (unsigned long)raw_ctx;
305 	unsigned long align = AESNI_ALIGN;
306 
307 	if (align <= crypto_tfm_ctx_alignment())
308 		align = 1;
309 	return (struct crypto_aes_ctx *)ALIGN(addr, align);
310 }
311 
aes_set_key_common(struct crypto_tfm * tfm,void * raw_ctx,const u8 * in_key,unsigned int key_len)312 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
313 			      const u8 *in_key, unsigned int key_len)
314 {
315 	struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
316 	int err;
317 
318 	if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
319 	    key_len != AES_KEYSIZE_256)
320 		return -EINVAL;
321 
322 	if (!crypto_simd_usable())
323 		err = aes_expandkey(ctx, in_key, key_len);
324 	else {
325 		kernel_fpu_begin();
326 		err = aesni_set_key(ctx, in_key, key_len);
327 		kernel_fpu_end();
328 	}
329 
330 	return err;
331 }
332 
aes_set_key(struct crypto_tfm * tfm,const u8 * in_key,unsigned int key_len)333 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
334 		       unsigned int key_len)
335 {
336 	return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
337 }
338 
aesni_encrypt(struct crypto_tfm * tfm,u8 * dst,const u8 * src)339 static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
340 {
341 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
342 
343 	if (!crypto_simd_usable()) {
344 		aes_encrypt(ctx, dst, src);
345 	} else {
346 		kernel_fpu_begin();
347 		aesni_enc(ctx, dst, src);
348 		kernel_fpu_end();
349 	}
350 }
351 
aesni_decrypt(struct crypto_tfm * tfm,u8 * dst,const u8 * src)352 static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
353 {
354 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
355 
356 	if (!crypto_simd_usable()) {
357 		aes_decrypt(ctx, dst, src);
358 	} else {
359 		kernel_fpu_begin();
360 		aesni_dec(ctx, dst, src);
361 		kernel_fpu_end();
362 	}
363 }
364 
aesni_skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int len)365 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
366 			         unsigned int len)
367 {
368 	return aes_set_key_common(crypto_skcipher_tfm(tfm),
369 				  crypto_skcipher_ctx(tfm), key, len);
370 }
371 
ecb_encrypt(struct skcipher_request * req)372 static int ecb_encrypt(struct skcipher_request *req)
373 {
374 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
375 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
376 	struct skcipher_walk walk;
377 	unsigned int nbytes;
378 	int err;
379 
380 	err = skcipher_walk_virt(&walk, req, true);
381 
382 	kernel_fpu_begin();
383 	while ((nbytes = walk.nbytes)) {
384 		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
385 			      nbytes & AES_BLOCK_MASK);
386 		nbytes &= AES_BLOCK_SIZE - 1;
387 		err = skcipher_walk_done(&walk, nbytes);
388 	}
389 	kernel_fpu_end();
390 
391 	return err;
392 }
393 
ecb_decrypt(struct skcipher_request * req)394 static int ecb_decrypt(struct skcipher_request *req)
395 {
396 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
397 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
398 	struct skcipher_walk walk;
399 	unsigned int nbytes;
400 	int err;
401 
402 	err = skcipher_walk_virt(&walk, req, true);
403 
404 	kernel_fpu_begin();
405 	while ((nbytes = walk.nbytes)) {
406 		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
407 			      nbytes & AES_BLOCK_MASK);
408 		nbytes &= AES_BLOCK_SIZE - 1;
409 		err = skcipher_walk_done(&walk, nbytes);
410 	}
411 	kernel_fpu_end();
412 
413 	return err;
414 }
415 
cbc_encrypt(struct skcipher_request * req)416 static int cbc_encrypt(struct skcipher_request *req)
417 {
418 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
419 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
420 	struct skcipher_walk walk;
421 	unsigned int nbytes;
422 	int err;
423 
424 	err = skcipher_walk_virt(&walk, req, true);
425 
426 	kernel_fpu_begin();
427 	while ((nbytes = walk.nbytes)) {
428 		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
429 			      nbytes & AES_BLOCK_MASK, walk.iv);
430 		nbytes &= AES_BLOCK_SIZE - 1;
431 		err = skcipher_walk_done(&walk, nbytes);
432 	}
433 	kernel_fpu_end();
434 
435 	return err;
436 }
437 
cbc_decrypt(struct skcipher_request * req)438 static int cbc_decrypt(struct skcipher_request *req)
439 {
440 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
441 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
442 	struct skcipher_walk walk;
443 	unsigned int nbytes;
444 	int err;
445 
446 	err = skcipher_walk_virt(&walk, req, true);
447 
448 	kernel_fpu_begin();
449 	while ((nbytes = walk.nbytes)) {
450 		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
451 			      nbytes & AES_BLOCK_MASK, walk.iv);
452 		nbytes &= AES_BLOCK_SIZE - 1;
453 		err = skcipher_walk_done(&walk, nbytes);
454 	}
455 	kernel_fpu_end();
456 
457 	return err;
458 }
459 
460 #ifdef CONFIG_X86_64
ctr_crypt_final(struct crypto_aes_ctx * ctx,struct skcipher_walk * walk)461 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
462 			    struct skcipher_walk *walk)
463 {
464 	u8 *ctrblk = walk->iv;
465 	u8 keystream[AES_BLOCK_SIZE];
466 	u8 *src = walk->src.virt.addr;
467 	u8 *dst = walk->dst.virt.addr;
468 	unsigned int nbytes = walk->nbytes;
469 
470 	aesni_enc(ctx, keystream, ctrblk);
471 	crypto_xor_cpy(dst, keystream, src, nbytes);
472 
473 	crypto_inc(ctrblk, AES_BLOCK_SIZE);
474 }
475 
aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx * ctx,u8 * out,const u8 * in,unsigned int len,u8 * iv)476 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
477 			      const u8 *in, unsigned int len, u8 *iv)
478 {
479 	/*
480 	 * based on key length, override with the by8 version
481 	 * of ctr mode encryption/decryption for improved performance
482 	 * aes_set_key_common() ensures that key length is one of
483 	 * {128,192,256}
484 	 */
485 	if (ctx->key_length == AES_KEYSIZE_128)
486 		aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
487 	else if (ctx->key_length == AES_KEYSIZE_192)
488 		aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
489 	else
490 		aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
491 }
492 
ctr_crypt(struct skcipher_request * req)493 static int ctr_crypt(struct skcipher_request *req)
494 {
495 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
496 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
497 	struct skcipher_walk walk;
498 	unsigned int nbytes;
499 	int err;
500 
501 	err = skcipher_walk_virt(&walk, req, true);
502 
503 	kernel_fpu_begin();
504 	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
505 		aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
506 			              nbytes & AES_BLOCK_MASK, walk.iv);
507 		nbytes &= AES_BLOCK_SIZE - 1;
508 		err = skcipher_walk_done(&walk, nbytes);
509 	}
510 	if (walk.nbytes) {
511 		ctr_crypt_final(ctx, &walk);
512 		err = skcipher_walk_done(&walk, 0);
513 	}
514 	kernel_fpu_end();
515 
516 	return err;
517 }
518 
xts_aesni_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)519 static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
520 			    unsigned int keylen)
521 {
522 	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
523 	int err;
524 
525 	err = xts_verify_key(tfm, key, keylen);
526 	if (err)
527 		return err;
528 
529 	keylen /= 2;
530 
531 	/* first half of xts-key is for crypt */
532 	err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
533 				 key, keylen);
534 	if (err)
535 		return err;
536 
537 	/* second half of xts-key is for tweak */
538 	return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
539 				  key + keylen, keylen);
540 }
541 
542 
aesni_xts_enc(const void * ctx,u8 * dst,const u8 * src,le128 * iv)543 static void aesni_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
544 {
545 	glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_enc);
546 }
547 
aesni_xts_dec(const void * ctx,u8 * dst,const u8 * src,le128 * iv)548 static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
549 {
550 	glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec);
551 }
552 
aesni_xts_enc32(const void * ctx,u8 * dst,const u8 * src,le128 * iv)553 static void aesni_xts_enc32(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
554 {
555 	aesni_xts_encrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv);
556 }
557 
aesni_xts_dec32(const void * ctx,u8 * dst,const u8 * src,le128 * iv)558 static void aesni_xts_dec32(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
559 {
560 	aesni_xts_decrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv);
561 }
562 
563 static const struct common_glue_ctx aesni_enc_xts = {
564 	.num_funcs = 2,
565 	.fpu_blocks_limit = 1,
566 
567 	.funcs = { {
568 		.num_blocks = 32,
569 		.fn_u = { .xts = aesni_xts_enc32 }
570 	}, {
571 		.num_blocks = 1,
572 		.fn_u = { .xts = aesni_xts_enc }
573 	} }
574 };
575 
576 static const struct common_glue_ctx aesni_dec_xts = {
577 	.num_funcs = 2,
578 	.fpu_blocks_limit = 1,
579 
580 	.funcs = { {
581 		.num_blocks = 32,
582 		.fn_u = { .xts = aesni_xts_dec32 }
583 	}, {
584 		.num_blocks = 1,
585 		.fn_u = { .xts = aesni_xts_dec }
586 	} }
587 };
588 
xts_encrypt(struct skcipher_request * req)589 static int xts_encrypt(struct skcipher_request *req)
590 {
591 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
592 	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
593 
594 	return glue_xts_req_128bit(&aesni_enc_xts, req, aesni_enc,
595 				   aes_ctx(ctx->raw_tweak_ctx),
596 				   aes_ctx(ctx->raw_crypt_ctx),
597 				   false);
598 }
599 
xts_decrypt(struct skcipher_request * req)600 static int xts_decrypt(struct skcipher_request *req)
601 {
602 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
603 	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
604 
605 	return glue_xts_req_128bit(&aesni_dec_xts, req, aesni_enc,
606 				   aes_ctx(ctx->raw_tweak_ctx),
607 				   aes_ctx(ctx->raw_crypt_ctx),
608 				   true);
609 }
610 
611 static int
rfc4106_set_hash_subkey(u8 * hash_subkey,const u8 * key,unsigned int key_len)612 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
613 {
614 	struct crypto_aes_ctx ctx;
615 	int ret;
616 
617 	ret = aes_expandkey(&ctx, key, key_len);
618 	if (ret)
619 		return ret;
620 
621 	/* Clear the data in the hash sub key container to zero.*/
622 	/* We want to cipher all zeros to create the hash sub key. */
623 	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
624 
625 	aes_encrypt(&ctx, hash_subkey, hash_subkey);
626 
627 	memzero_explicit(&ctx, sizeof(ctx));
628 	return 0;
629 }
630 
common_rfc4106_set_key(struct crypto_aead * aead,const u8 * key,unsigned int key_len)631 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
632 				  unsigned int key_len)
633 {
634 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
635 
636 	if (key_len < 4)
637 		return -EINVAL;
638 
639 	/*Account for 4 byte nonce at the end.*/
640 	key_len -= 4;
641 
642 	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
643 
644 	return aes_set_key_common(crypto_aead_tfm(aead),
645 				  &ctx->aes_key_expanded, key, key_len) ?:
646 	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
647 }
648 
649 /* This is the Integrity Check Value (aka the authentication tag) length and can
650  * be 8, 12 or 16 bytes long. */
common_rfc4106_set_authsize(struct crypto_aead * aead,unsigned int authsize)651 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
652 				       unsigned int authsize)
653 {
654 	switch (authsize) {
655 	case 8:
656 	case 12:
657 	case 16:
658 		break;
659 	default:
660 		return -EINVAL;
661 	}
662 
663 	return 0;
664 }
665 
generic_gcmaes_set_authsize(struct crypto_aead * tfm,unsigned int authsize)666 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
667 				       unsigned int authsize)
668 {
669 	switch (authsize) {
670 	case 4:
671 	case 8:
672 	case 12:
673 	case 13:
674 	case 14:
675 	case 15:
676 	case 16:
677 		break;
678 	default:
679 		return -EINVAL;
680 	}
681 
682 	return 0;
683 }
684 
gcmaes_crypt_by_sg(bool enc,struct aead_request * req,unsigned int assoclen,u8 * hash_subkey,u8 * iv,void * aes_ctx)685 static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
686 			      unsigned int assoclen, u8 *hash_subkey,
687 			      u8 *iv, void *aes_ctx)
688 {
689 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
690 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
691 	const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
692 	u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
693 	struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
694 	struct scatter_walk dst_sg_walk = {};
695 	unsigned long left = req->cryptlen;
696 	unsigned long len, srclen, dstlen;
697 	struct scatter_walk assoc_sg_walk;
698 	struct scatter_walk src_sg_walk;
699 	struct scatterlist src_start[2];
700 	struct scatterlist dst_start[2];
701 	struct scatterlist *src_sg;
702 	struct scatterlist *dst_sg;
703 	u8 *src, *dst, *assoc;
704 	u8 *assocmem = NULL;
705 	u8 authTag[16];
706 
707 	if (!enc)
708 		left -= auth_tag_len;
709 
710 	if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4)
711 		gcm_tfm = &aesni_gcm_tfm_avx_gen2;
712 	if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2)
713 		gcm_tfm = &aesni_gcm_tfm_sse;
714 
715 	/* Linearize assoc, if not already linear */
716 	if (req->src->length >= assoclen && req->src->length &&
717 		(!PageHighMem(sg_page(req->src)) ||
718 			req->src->offset + req->src->length <= PAGE_SIZE)) {
719 		scatterwalk_start(&assoc_sg_walk, req->src);
720 		assoc = scatterwalk_map(&assoc_sg_walk);
721 	} else {
722 		/* assoc can be any length, so must be on heap */
723 		assocmem = kmalloc(assoclen, GFP_ATOMIC);
724 		if (unlikely(!assocmem))
725 			return -ENOMEM;
726 		assoc = assocmem;
727 
728 		scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
729 	}
730 
731 	if (left) {
732 		src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
733 		scatterwalk_start(&src_sg_walk, src_sg);
734 		if (req->src != req->dst) {
735 			dst_sg = scatterwalk_ffwd(dst_start, req->dst,
736 						  req->assoclen);
737 			scatterwalk_start(&dst_sg_walk, dst_sg);
738 		}
739 	}
740 
741 	kernel_fpu_begin();
742 	gcm_tfm->init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
743 	if (req->src != req->dst) {
744 		while (left) {
745 			src = scatterwalk_map(&src_sg_walk);
746 			dst = scatterwalk_map(&dst_sg_walk);
747 			srclen = scatterwalk_clamp(&src_sg_walk, left);
748 			dstlen = scatterwalk_clamp(&dst_sg_walk, left);
749 			len = min(srclen, dstlen);
750 			if (len) {
751 				if (enc)
752 					gcm_tfm->enc_update(aes_ctx, data,
753 							     dst, src, len);
754 				else
755 					gcm_tfm->dec_update(aes_ctx, data,
756 							     dst, src, len);
757 			}
758 			left -= len;
759 
760 			scatterwalk_unmap(src);
761 			scatterwalk_unmap(dst);
762 			scatterwalk_advance(&src_sg_walk, len);
763 			scatterwalk_advance(&dst_sg_walk, len);
764 			scatterwalk_done(&src_sg_walk, 0, left);
765 			scatterwalk_done(&dst_sg_walk, 1, left);
766 		}
767 	} else {
768 		while (left) {
769 			dst = src = scatterwalk_map(&src_sg_walk);
770 			len = scatterwalk_clamp(&src_sg_walk, left);
771 			if (len) {
772 				if (enc)
773 					gcm_tfm->enc_update(aes_ctx, data,
774 							     src, src, len);
775 				else
776 					gcm_tfm->dec_update(aes_ctx, data,
777 							     src, src, len);
778 			}
779 			left -= len;
780 			scatterwalk_unmap(src);
781 			scatterwalk_advance(&src_sg_walk, len);
782 			scatterwalk_done(&src_sg_walk, 1, left);
783 		}
784 	}
785 	gcm_tfm->finalize(aes_ctx, data, authTag, auth_tag_len);
786 	kernel_fpu_end();
787 
788 	if (!assocmem)
789 		scatterwalk_unmap(assoc);
790 	else
791 		kfree(assocmem);
792 
793 	if (!enc) {
794 		u8 authTagMsg[16];
795 
796 		/* Copy out original authTag */
797 		scatterwalk_map_and_copy(authTagMsg, req->src,
798 					 req->assoclen + req->cryptlen -
799 					 auth_tag_len,
800 					 auth_tag_len, 0);
801 
802 		/* Compare generated tag with passed in tag. */
803 		return crypto_memneq(authTagMsg, authTag, auth_tag_len) ?
804 			-EBADMSG : 0;
805 	}
806 
807 	/* Copy in the authTag */
808 	scatterwalk_map_and_copy(authTag, req->dst,
809 				 req->assoclen + req->cryptlen,
810 				 auth_tag_len, 1);
811 
812 	return 0;
813 }
814 
gcmaes_encrypt(struct aead_request * req,unsigned int assoclen,u8 * hash_subkey,u8 * iv,void * aes_ctx)815 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
816 			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
817 {
818 	return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
819 				aes_ctx);
820 }
821 
gcmaes_decrypt(struct aead_request * req,unsigned int assoclen,u8 * hash_subkey,u8 * iv,void * aes_ctx)822 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
823 			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
824 {
825 	return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
826 				aes_ctx);
827 }
828 
helper_rfc4106_encrypt(struct aead_request * req)829 static int helper_rfc4106_encrypt(struct aead_request *req)
830 {
831 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
832 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
833 	void *aes_ctx = &(ctx->aes_key_expanded);
834 	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
835 	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
836 	unsigned int i;
837 	__be32 counter = cpu_to_be32(1);
838 
839 	/* Assuming we are supporting rfc4106 64-bit extended */
840 	/* sequence numbers We need to have the AAD length equal */
841 	/* to 16 or 20 bytes */
842 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
843 		return -EINVAL;
844 
845 	/* IV below built */
846 	for (i = 0; i < 4; i++)
847 		*(iv+i) = ctx->nonce[i];
848 	for (i = 0; i < 8; i++)
849 		*(iv+4+i) = req->iv[i];
850 	*((__be32 *)(iv+12)) = counter;
851 
852 	return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
853 			      aes_ctx);
854 }
855 
helper_rfc4106_decrypt(struct aead_request * req)856 static int helper_rfc4106_decrypt(struct aead_request *req)
857 {
858 	__be32 counter = cpu_to_be32(1);
859 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
860 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
861 	void *aes_ctx = &(ctx->aes_key_expanded);
862 	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
863 	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
864 	unsigned int i;
865 
866 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
867 		return -EINVAL;
868 
869 	/* Assuming we are supporting rfc4106 64-bit extended */
870 	/* sequence numbers We need to have the AAD length */
871 	/* equal to 16 or 20 bytes */
872 
873 	/* IV below built */
874 	for (i = 0; i < 4; i++)
875 		*(iv+i) = ctx->nonce[i];
876 	for (i = 0; i < 8; i++)
877 		*(iv+4+i) = req->iv[i];
878 	*((__be32 *)(iv+12)) = counter;
879 
880 	return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
881 			      aes_ctx);
882 }
883 #endif
884 
885 static struct crypto_alg aesni_cipher_alg = {
886 	.cra_name		= "aes",
887 	.cra_driver_name	= "aes-aesni",
888 	.cra_priority		= 300,
889 	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
890 	.cra_blocksize		= AES_BLOCK_SIZE,
891 	.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
892 	.cra_module		= THIS_MODULE,
893 	.cra_u	= {
894 		.cipher	= {
895 			.cia_min_keysize	= AES_MIN_KEY_SIZE,
896 			.cia_max_keysize	= AES_MAX_KEY_SIZE,
897 			.cia_setkey		= aes_set_key,
898 			.cia_encrypt		= aesni_encrypt,
899 			.cia_decrypt		= aesni_decrypt
900 		}
901 	}
902 };
903 
904 static struct skcipher_alg aesni_skciphers[] = {
905 	{
906 		.base = {
907 			.cra_name		= "__ecb(aes)",
908 			.cra_driver_name	= "__ecb-aes-aesni",
909 			.cra_priority		= 400,
910 			.cra_flags		= CRYPTO_ALG_INTERNAL,
911 			.cra_blocksize		= AES_BLOCK_SIZE,
912 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
913 			.cra_module		= THIS_MODULE,
914 		},
915 		.min_keysize	= AES_MIN_KEY_SIZE,
916 		.max_keysize	= AES_MAX_KEY_SIZE,
917 		.setkey		= aesni_skcipher_setkey,
918 		.encrypt	= ecb_encrypt,
919 		.decrypt	= ecb_decrypt,
920 	}, {
921 		.base = {
922 			.cra_name		= "__cbc(aes)",
923 			.cra_driver_name	= "__cbc-aes-aesni",
924 			.cra_priority		= 400,
925 			.cra_flags		= CRYPTO_ALG_INTERNAL,
926 			.cra_blocksize		= AES_BLOCK_SIZE,
927 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
928 			.cra_module		= THIS_MODULE,
929 		},
930 		.min_keysize	= AES_MIN_KEY_SIZE,
931 		.max_keysize	= AES_MAX_KEY_SIZE,
932 		.ivsize		= AES_BLOCK_SIZE,
933 		.setkey		= aesni_skcipher_setkey,
934 		.encrypt	= cbc_encrypt,
935 		.decrypt	= cbc_decrypt,
936 #ifdef CONFIG_X86_64
937 	}, {
938 		.base = {
939 			.cra_name		= "__ctr(aes)",
940 			.cra_driver_name	= "__ctr-aes-aesni",
941 			.cra_priority		= 400,
942 			.cra_flags		= CRYPTO_ALG_INTERNAL,
943 			.cra_blocksize		= 1,
944 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
945 			.cra_module		= THIS_MODULE,
946 		},
947 		.min_keysize	= AES_MIN_KEY_SIZE,
948 		.max_keysize	= AES_MAX_KEY_SIZE,
949 		.ivsize		= AES_BLOCK_SIZE,
950 		.chunksize	= AES_BLOCK_SIZE,
951 		.setkey		= aesni_skcipher_setkey,
952 		.encrypt	= ctr_crypt,
953 		.decrypt	= ctr_crypt,
954 	}, {
955 		.base = {
956 			.cra_name		= "__xts(aes)",
957 			.cra_driver_name	= "__xts-aes-aesni",
958 			.cra_priority		= 401,
959 			.cra_flags		= CRYPTO_ALG_INTERNAL,
960 			.cra_blocksize		= AES_BLOCK_SIZE,
961 			.cra_ctxsize		= XTS_AES_CTX_SIZE,
962 			.cra_module		= THIS_MODULE,
963 		},
964 		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
965 		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
966 		.ivsize		= AES_BLOCK_SIZE,
967 		.setkey		= xts_aesni_setkey,
968 		.encrypt	= xts_encrypt,
969 		.decrypt	= xts_decrypt,
970 #endif
971 	}
972 };
973 
974 static
975 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
976 
977 #ifdef CONFIG_X86_64
generic_gcmaes_set_key(struct crypto_aead * aead,const u8 * key,unsigned int key_len)978 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
979 				  unsigned int key_len)
980 {
981 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
982 
983 	return aes_set_key_common(crypto_aead_tfm(aead),
984 				  &ctx->aes_key_expanded, key, key_len) ?:
985 	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
986 }
987 
generic_gcmaes_encrypt(struct aead_request * req)988 static int generic_gcmaes_encrypt(struct aead_request *req)
989 {
990 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
991 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
992 	void *aes_ctx = &(ctx->aes_key_expanded);
993 	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
994 	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
995 	__be32 counter = cpu_to_be32(1);
996 
997 	memcpy(iv, req->iv, 12);
998 	*((__be32 *)(iv+12)) = counter;
999 
1000 	return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1001 			      aes_ctx);
1002 }
1003 
generic_gcmaes_decrypt(struct aead_request * req)1004 static int generic_gcmaes_decrypt(struct aead_request *req)
1005 {
1006 	__be32 counter = cpu_to_be32(1);
1007 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1008 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1009 	void *aes_ctx = &(ctx->aes_key_expanded);
1010 	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1011 	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1012 
1013 	memcpy(iv, req->iv, 12);
1014 	*((__be32 *)(iv+12)) = counter;
1015 
1016 	return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1017 			      aes_ctx);
1018 }
1019 
1020 static struct aead_alg aesni_aeads[] = { {
1021 	.setkey			= common_rfc4106_set_key,
1022 	.setauthsize		= common_rfc4106_set_authsize,
1023 	.encrypt		= helper_rfc4106_encrypt,
1024 	.decrypt		= helper_rfc4106_decrypt,
1025 	.ivsize			= GCM_RFC4106_IV_SIZE,
1026 	.maxauthsize		= 16,
1027 	.base = {
1028 		.cra_name		= "__rfc4106(gcm(aes))",
1029 		.cra_driver_name	= "__rfc4106-gcm-aesni",
1030 		.cra_priority		= 400,
1031 		.cra_flags		= CRYPTO_ALG_INTERNAL,
1032 		.cra_blocksize		= 1,
1033 		.cra_ctxsize		= sizeof(struct aesni_rfc4106_gcm_ctx),
1034 		.cra_alignmask		= AESNI_ALIGN - 1,
1035 		.cra_module		= THIS_MODULE,
1036 	},
1037 }, {
1038 	.setkey			= generic_gcmaes_set_key,
1039 	.setauthsize		= generic_gcmaes_set_authsize,
1040 	.encrypt		= generic_gcmaes_encrypt,
1041 	.decrypt		= generic_gcmaes_decrypt,
1042 	.ivsize			= GCM_AES_IV_SIZE,
1043 	.maxauthsize		= 16,
1044 	.base = {
1045 		.cra_name		= "__gcm(aes)",
1046 		.cra_driver_name	= "__generic-gcm-aesni",
1047 		.cra_priority		= 400,
1048 		.cra_flags		= CRYPTO_ALG_INTERNAL,
1049 		.cra_blocksize		= 1,
1050 		.cra_ctxsize		= sizeof(struct generic_gcmaes_ctx),
1051 		.cra_alignmask		= AESNI_ALIGN - 1,
1052 		.cra_module		= THIS_MODULE,
1053 	},
1054 } };
1055 #else
1056 static struct aead_alg aesni_aeads[0];
1057 #endif
1058 
1059 static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
1060 
1061 static const struct x86_cpu_id aesni_cpu_id[] = {
1062 	X86_MATCH_FEATURE(X86_FEATURE_AES, NULL),
1063 	{}
1064 };
1065 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1066 
aesni_init(void)1067 static int __init aesni_init(void)
1068 {
1069 	int err;
1070 
1071 	if (!x86_match_cpu(aesni_cpu_id))
1072 		return -ENODEV;
1073 #ifdef CONFIG_X86_64
1074 	if (boot_cpu_has(X86_FEATURE_AVX2)) {
1075 		pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1076 		aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4;
1077 	} else
1078 	if (boot_cpu_has(X86_FEATURE_AVX)) {
1079 		pr_info("AVX version of gcm_enc/dec engaged.\n");
1080 		aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2;
1081 	} else {
1082 		pr_info("SSE version of gcm_enc/dec engaged.\n");
1083 		aesni_gcm_tfm = &aesni_gcm_tfm_sse;
1084 	}
1085 	aesni_ctr_enc_tfm = aesni_ctr_enc;
1086 	if (boot_cpu_has(X86_FEATURE_AVX)) {
1087 		/* optimize performance of ctr mode encryption transform */
1088 		aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1089 		pr_info("AES CTR mode by8 optimization enabled\n");
1090 	}
1091 #endif
1092 
1093 	err = crypto_register_alg(&aesni_cipher_alg);
1094 	if (err)
1095 		return err;
1096 
1097 	err = simd_register_skciphers_compat(aesni_skciphers,
1098 					     ARRAY_SIZE(aesni_skciphers),
1099 					     aesni_simd_skciphers);
1100 	if (err)
1101 		goto unregister_cipher;
1102 
1103 	err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1104 					 aesni_simd_aeads);
1105 	if (err)
1106 		goto unregister_skciphers;
1107 
1108 	return 0;
1109 
1110 unregister_skciphers:
1111 	simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1112 				  aesni_simd_skciphers);
1113 unregister_cipher:
1114 	crypto_unregister_alg(&aesni_cipher_alg);
1115 	return err;
1116 }
1117 
aesni_exit(void)1118 static void __exit aesni_exit(void)
1119 {
1120 	simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1121 			      aesni_simd_aeads);
1122 	simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1123 				  aesni_simd_skciphers);
1124 	crypto_unregister_alg(&aesni_cipher_alg);
1125 }
1126 
1127 late_initcall(aesni_init);
1128 module_exit(aesni_exit);
1129 
1130 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1131 MODULE_LICENSE("GPL");
1132 MODULE_ALIAS_CRYPTO("aes");
1133