• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Support for Intel AES-NI instructions. This file contains glue
3  * code, the real AES implementation is in intel-aes_asm.S.
4  *
5  * Copyright (C) 2008, Intel Corp.
6  *    Author: Huang Ying <ying.huang@intel.com>
7  *
8  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9  * interface for 64-bit kernels.
10  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
11  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
12  *             Tadeusz Struk (tadeusz.struk@intel.com)
13  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
14  *    Copyright (c) 2010, Intel Corporation.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  */
21 
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/module.h>
25 #include <linux/err.h>
26 #include <crypto/algapi.h>
27 #include <crypto/aes.h>
28 #include <crypto/cryptd.h>
29 #include <crypto/ctr.h>
30 #include <crypto/b128ops.h>
31 #include <crypto/gcm.h>
32 #include <crypto/xts.h>
33 #include <asm/cpu_device_id.h>
34 #include <asm/fpu/api.h>
35 #include <asm/crypto/aes.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/internal/aead.h>
38 #include <crypto/internal/simd.h>
39 #include <crypto/internal/skcipher.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
42 #ifdef CONFIG_X86_64
43 #include <asm/crypto/glue_helper.h>
44 #endif
45 
46 
47 #define AESNI_ALIGN	16
48 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
49 #define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE - 1))
50 #define RFC4106_HASH_SUBKEY_SIZE 16
51 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
52 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
53 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
54 
55 /* This data is stored at the end of the crypto_tfm struct.
56  * It's a type of per "session" data storage location.
57  * This needs to be 16 byte aligned.
58  */
59 struct aesni_rfc4106_gcm_ctx {
60 	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
61 	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
62 	u8 nonce[4];
63 };
64 
65 struct generic_gcmaes_ctx {
66 	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
67 	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
68 };
69 
70 struct aesni_xts_ctx {
71 	u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
72 	u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
73 };
74 
75 #define GCM_BLOCK_LEN 16
76 
77 struct gcm_context_data {
78 	/* init, update and finalize context data */
79 	u8 aad_hash[GCM_BLOCK_LEN];
80 	u64 aad_length;
81 	u64 in_length;
82 	u8 partial_block_enc_key[GCM_BLOCK_LEN];
83 	u8 orig_IV[GCM_BLOCK_LEN];
84 	u8 current_counter[GCM_BLOCK_LEN];
85 	u64 partial_block_len;
86 	u64 unused;
87 	u8 hash_keys[GCM_BLOCK_LEN * 8];
88 };
89 
90 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
91 			     unsigned int key_len);
92 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
93 			  const u8 *in);
94 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
95 			  const u8 *in);
96 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
97 			      const u8 *in, unsigned int len);
98 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
99 			      const u8 *in, unsigned int len);
100 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
101 			      const u8 *in, unsigned int len, u8 *iv);
102 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
103 			      const u8 *in, unsigned int len, u8 *iv);
104 
105 int crypto_fpu_init(void);
106 void crypto_fpu_exit(void);
107 
108 #define AVX_GEN2_OPTSIZE 640
109 #define AVX_GEN4_OPTSIZE 4096
110 
111 #ifdef CONFIG_X86_64
112 
113 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
114 			      const u8 *in, unsigned int len, u8 *iv);
115 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
116 			      const u8 *in, unsigned int len, u8 *iv);
117 
118 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
119 				 const u8 *in, bool enc, u8 *iv);
120 
121 /* asmlinkage void aesni_gcm_enc()
122  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
123  * struct gcm_context_data.  May be uninitialized.
124  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
125  * const u8 *in, Plaintext input
126  * unsigned long plaintext_len, Length of data in bytes for encryption.
127  * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
128  *         16-byte aligned pointer.
129  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
130  * const u8 *aad, Additional Authentication Data (AAD)
131  * unsigned long aad_len, Length of AAD in bytes.
132  * u8 *auth_tag, Authenticated Tag output.
133  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
134  *          Valid values are 16 (most likely), 12 or 8.
135  */
136 asmlinkage void aesni_gcm_enc(void *ctx,
137 			struct gcm_context_data *gdata, u8 *out,
138 			const u8 *in, unsigned long plaintext_len, u8 *iv,
139 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
140 			u8 *auth_tag, unsigned long auth_tag_len);
141 
142 /* asmlinkage void aesni_gcm_dec()
143  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
144  * struct gcm_context_data.  May be uninitialized.
145  * u8 *out, Plaintext output. Decrypt in-place is allowed.
146  * const u8 *in, Ciphertext input
147  * unsigned long ciphertext_len, Length of data in bytes for decryption.
148  * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
149  *         16-byte aligned pointer.
150  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
151  * const u8 *aad, Additional Authentication Data (AAD)
152  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
153  * to be 8 or 12 bytes
154  * u8 *auth_tag, Authenticated Tag output.
155  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
156  * Valid values are 16 (most likely), 12 or 8.
157  */
158 asmlinkage void aesni_gcm_dec(void *ctx,
159 			struct gcm_context_data *gdata, u8 *out,
160 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
161 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
162 			u8 *auth_tag, unsigned long auth_tag_len);
163 
164 /* Scatter / Gather routines, with args similar to above */
165 asmlinkage void aesni_gcm_init(void *ctx,
166 			       struct gcm_context_data *gdata,
167 			       u8 *iv,
168 			       u8 *hash_subkey, const u8 *aad,
169 			       unsigned long aad_len);
170 asmlinkage void aesni_gcm_enc_update(void *ctx,
171 				     struct gcm_context_data *gdata, u8 *out,
172 				     const u8 *in, unsigned long plaintext_len);
173 asmlinkage void aesni_gcm_dec_update(void *ctx,
174 				     struct gcm_context_data *gdata, u8 *out,
175 				     const u8 *in,
176 				     unsigned long ciphertext_len);
177 asmlinkage void aesni_gcm_finalize(void *ctx,
178 				   struct gcm_context_data *gdata,
179 				   u8 *auth_tag, unsigned long auth_tag_len);
180 
181 #ifdef CONFIG_AS_AVX
182 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
183 		void *keys, u8 *out, unsigned int num_bytes);
184 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
185 		void *keys, u8 *out, unsigned int num_bytes);
186 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
187 		void *keys, u8 *out, unsigned int num_bytes);
188 /*
189  * asmlinkage void aesni_gcm_precomp_avx_gen2()
190  * gcm_data *my_ctx_data, context data
191  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
192  */
193 asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
194 
195 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
196 			const u8 *in, unsigned long plaintext_len, u8 *iv,
197 			const u8 *aad, unsigned long aad_len,
198 			u8 *auth_tag, unsigned long auth_tag_len);
199 
200 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
201 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
202 			const u8 *aad, unsigned long aad_len,
203 			u8 *auth_tag, unsigned long auth_tag_len);
204 
aesni_gcm_enc_avx(void * ctx,struct gcm_context_data * data,u8 * out,const u8 * in,unsigned long plaintext_len,u8 * iv,u8 * hash_subkey,const u8 * aad,unsigned long aad_len,u8 * auth_tag,unsigned long auth_tag_len)205 static void aesni_gcm_enc_avx(void *ctx,
206 			struct gcm_context_data *data, u8 *out,
207 			const u8 *in, unsigned long plaintext_len, u8 *iv,
208 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
209 			u8 *auth_tag, unsigned long auth_tag_len)
210 {
211         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
212 	if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
213 		aesni_gcm_enc(ctx, data, out, in,
214 			plaintext_len, iv, hash_subkey, aad,
215 			aad_len, auth_tag, auth_tag_len);
216 	} else {
217 		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
218 		aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
219 					aad_len, auth_tag, auth_tag_len);
220 	}
221 }
222 
aesni_gcm_dec_avx(void * ctx,struct gcm_context_data * data,u8 * out,const u8 * in,unsigned long ciphertext_len,u8 * iv,u8 * hash_subkey,const u8 * aad,unsigned long aad_len,u8 * auth_tag,unsigned long auth_tag_len)223 static void aesni_gcm_dec_avx(void *ctx,
224 			struct gcm_context_data *data, u8 *out,
225 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
226 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
227 			u8 *auth_tag, unsigned long auth_tag_len)
228 {
229         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
230 	if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
231 		aesni_gcm_dec(ctx, data, out, in,
232 			ciphertext_len, iv, hash_subkey, aad,
233 			aad_len, auth_tag, auth_tag_len);
234 	} else {
235 		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
236 		aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
237 					aad_len, auth_tag, auth_tag_len);
238 	}
239 }
240 #endif
241 
242 #ifdef CONFIG_AS_AVX2
243 /*
244  * asmlinkage void aesni_gcm_precomp_avx_gen4()
245  * gcm_data *my_ctx_data, context data
246  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
247  */
248 asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
249 
250 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
251 			const u8 *in, unsigned long plaintext_len, u8 *iv,
252 			const u8 *aad, unsigned long aad_len,
253 			u8 *auth_tag, unsigned long auth_tag_len);
254 
255 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
256 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
257 			const u8 *aad, unsigned long aad_len,
258 			u8 *auth_tag, unsigned long auth_tag_len);
259 
aesni_gcm_enc_avx2(void * ctx,struct gcm_context_data * data,u8 * out,const u8 * in,unsigned long plaintext_len,u8 * iv,u8 * hash_subkey,const u8 * aad,unsigned long aad_len,u8 * auth_tag,unsigned long auth_tag_len)260 static void aesni_gcm_enc_avx2(void *ctx,
261 			struct gcm_context_data *data, u8 *out,
262 			const u8 *in, unsigned long plaintext_len, u8 *iv,
263 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
264 			u8 *auth_tag, unsigned long auth_tag_len)
265 {
266        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
267 	if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
268 		aesni_gcm_enc(ctx, data, out, in,
269 			      plaintext_len, iv, hash_subkey, aad,
270 			      aad_len, auth_tag, auth_tag_len);
271 	} else if (plaintext_len < AVX_GEN4_OPTSIZE) {
272 		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
273 		aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
274 					aad_len, auth_tag, auth_tag_len);
275 	} else {
276 		aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
277 		aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
278 					aad_len, auth_tag, auth_tag_len);
279 	}
280 }
281 
aesni_gcm_dec_avx2(void * ctx,struct gcm_context_data * data,u8 * out,const u8 * in,unsigned long ciphertext_len,u8 * iv,u8 * hash_subkey,const u8 * aad,unsigned long aad_len,u8 * auth_tag,unsigned long auth_tag_len)282 static void aesni_gcm_dec_avx2(void *ctx,
283 	struct gcm_context_data *data, u8 *out,
284 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
285 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
286 			u8 *auth_tag, unsigned long auth_tag_len)
287 {
288        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
289 	if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
290 		aesni_gcm_dec(ctx, data, out, in,
291 			      ciphertext_len, iv, hash_subkey,
292 			      aad, aad_len, auth_tag, auth_tag_len);
293 	} else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
294 		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
295 		aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
296 					aad_len, auth_tag, auth_tag_len);
297 	} else {
298 		aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
299 		aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
300 					aad_len, auth_tag, auth_tag_len);
301 	}
302 }
303 #endif
304 
305 static void (*aesni_gcm_enc_tfm)(void *ctx,
306 				 struct gcm_context_data *data, u8 *out,
307 				 const u8 *in, unsigned long plaintext_len,
308 				 u8 *iv, u8 *hash_subkey, const u8 *aad,
309 				 unsigned long aad_len, u8 *auth_tag,
310 				 unsigned long auth_tag_len);
311 
312 static void (*aesni_gcm_dec_tfm)(void *ctx,
313 				 struct gcm_context_data *data, u8 *out,
314 				 const u8 *in, unsigned long ciphertext_len,
315 				 u8 *iv, u8 *hash_subkey, const u8 *aad,
316 				 unsigned long aad_len, u8 *auth_tag,
317 				 unsigned long auth_tag_len);
318 
319 static inline struct
aesni_rfc4106_gcm_ctx_get(struct crypto_aead * tfm)320 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
321 {
322 	unsigned long align = AESNI_ALIGN;
323 
324 	if (align <= crypto_tfm_ctx_alignment())
325 		align = 1;
326 	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
327 }
328 
329 static inline struct
generic_gcmaes_ctx_get(struct crypto_aead * tfm)330 generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
331 {
332 	unsigned long align = AESNI_ALIGN;
333 
334 	if (align <= crypto_tfm_ctx_alignment())
335 		align = 1;
336 	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
337 }
338 #endif
339 
aes_ctx(void * raw_ctx)340 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
341 {
342 	unsigned long addr = (unsigned long)raw_ctx;
343 	unsigned long align = AESNI_ALIGN;
344 
345 	if (align <= crypto_tfm_ctx_alignment())
346 		align = 1;
347 	return (struct crypto_aes_ctx *)ALIGN(addr, align);
348 }
349 
aes_set_key_common(struct crypto_tfm * tfm,void * raw_ctx,const u8 * in_key,unsigned int key_len)350 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
351 			      const u8 *in_key, unsigned int key_len)
352 {
353 	struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
354 	u32 *flags = &tfm->crt_flags;
355 	int err;
356 
357 	if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
358 	    key_len != AES_KEYSIZE_256) {
359 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
360 		return -EINVAL;
361 	}
362 
363 	if (!irq_fpu_usable())
364 		err = crypto_aes_expand_key(ctx, in_key, key_len);
365 	else {
366 		kernel_fpu_begin();
367 		err = aesni_set_key(ctx, in_key, key_len);
368 		kernel_fpu_end();
369 	}
370 
371 	return err;
372 }
373 
aes_set_key(struct crypto_tfm * tfm,const u8 * in_key,unsigned int key_len)374 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
375 		       unsigned int key_len)
376 {
377 	return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
378 }
379 
aes_encrypt(struct crypto_tfm * tfm,u8 * dst,const u8 * src)380 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
381 {
382 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
383 
384 	if (!irq_fpu_usable())
385 		crypto_aes_encrypt_x86(ctx, dst, src);
386 	else {
387 		kernel_fpu_begin();
388 		aesni_enc(ctx, dst, src);
389 		kernel_fpu_end();
390 	}
391 }
392 
aes_decrypt(struct crypto_tfm * tfm,u8 * dst,const u8 * src)393 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
394 {
395 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
396 
397 	if (!irq_fpu_usable())
398 		crypto_aes_decrypt_x86(ctx, dst, src);
399 	else {
400 		kernel_fpu_begin();
401 		aesni_dec(ctx, dst, src);
402 		kernel_fpu_end();
403 	}
404 }
405 
__aes_encrypt(struct crypto_tfm * tfm,u8 * dst,const u8 * src)406 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
407 {
408 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
409 
410 	aesni_enc(ctx, dst, src);
411 }
412 
__aes_decrypt(struct crypto_tfm * tfm,u8 * dst,const u8 * src)413 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
414 {
415 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
416 
417 	aesni_dec(ctx, dst, src);
418 }
419 
aesni_skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int len)420 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
421 			         unsigned int len)
422 {
423 	return aes_set_key_common(crypto_skcipher_tfm(tfm),
424 				  crypto_skcipher_ctx(tfm), key, len);
425 }
426 
ecb_encrypt(struct skcipher_request * req)427 static int ecb_encrypt(struct skcipher_request *req)
428 {
429 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
430 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
431 	struct skcipher_walk walk;
432 	unsigned int nbytes;
433 	int err;
434 
435 	err = skcipher_walk_virt(&walk, req, true);
436 
437 	kernel_fpu_begin();
438 	while ((nbytes = walk.nbytes)) {
439 		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
440 			      nbytes & AES_BLOCK_MASK);
441 		nbytes &= AES_BLOCK_SIZE - 1;
442 		err = skcipher_walk_done(&walk, nbytes);
443 	}
444 	kernel_fpu_end();
445 
446 	return err;
447 }
448 
ecb_decrypt(struct skcipher_request * req)449 static int ecb_decrypt(struct skcipher_request *req)
450 {
451 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
452 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
453 	struct skcipher_walk walk;
454 	unsigned int nbytes;
455 	int err;
456 
457 	err = skcipher_walk_virt(&walk, req, true);
458 
459 	kernel_fpu_begin();
460 	while ((nbytes = walk.nbytes)) {
461 		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
462 			      nbytes & AES_BLOCK_MASK);
463 		nbytes &= AES_BLOCK_SIZE - 1;
464 		err = skcipher_walk_done(&walk, nbytes);
465 	}
466 	kernel_fpu_end();
467 
468 	return err;
469 }
470 
cbc_encrypt(struct skcipher_request * req)471 static int cbc_encrypt(struct skcipher_request *req)
472 {
473 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
474 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
475 	struct skcipher_walk walk;
476 	unsigned int nbytes;
477 	int err;
478 
479 	err = skcipher_walk_virt(&walk, req, true);
480 
481 	kernel_fpu_begin();
482 	while ((nbytes = walk.nbytes)) {
483 		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
484 			      nbytes & AES_BLOCK_MASK, walk.iv);
485 		nbytes &= AES_BLOCK_SIZE - 1;
486 		err = skcipher_walk_done(&walk, nbytes);
487 	}
488 	kernel_fpu_end();
489 
490 	return err;
491 }
492 
cbc_decrypt(struct skcipher_request * req)493 static int cbc_decrypt(struct skcipher_request *req)
494 {
495 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
496 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
497 	struct skcipher_walk walk;
498 	unsigned int nbytes;
499 	int err;
500 
501 	err = skcipher_walk_virt(&walk, req, true);
502 
503 	kernel_fpu_begin();
504 	while ((nbytes = walk.nbytes)) {
505 		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
506 			      nbytes & AES_BLOCK_MASK, walk.iv);
507 		nbytes &= AES_BLOCK_SIZE - 1;
508 		err = skcipher_walk_done(&walk, nbytes);
509 	}
510 	kernel_fpu_end();
511 
512 	return err;
513 }
514 
515 #ifdef CONFIG_X86_64
ctr_crypt_final(struct crypto_aes_ctx * ctx,struct skcipher_walk * walk)516 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
517 			    struct skcipher_walk *walk)
518 {
519 	u8 *ctrblk = walk->iv;
520 	u8 keystream[AES_BLOCK_SIZE];
521 	u8 *src = walk->src.virt.addr;
522 	u8 *dst = walk->dst.virt.addr;
523 	unsigned int nbytes = walk->nbytes;
524 
525 	aesni_enc(ctx, keystream, ctrblk);
526 	crypto_xor_cpy(dst, keystream, src, nbytes);
527 
528 	crypto_inc(ctrblk, AES_BLOCK_SIZE);
529 }
530 
531 #ifdef CONFIG_AS_AVX
aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx * ctx,u8 * out,const u8 * in,unsigned int len,u8 * iv)532 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
533 			      const u8 *in, unsigned int len, u8 *iv)
534 {
535 	/*
536 	 * based on key length, override with the by8 version
537 	 * of ctr mode encryption/decryption for improved performance
538 	 * aes_set_key_common() ensures that key length is one of
539 	 * {128,192,256}
540 	 */
541 	if (ctx->key_length == AES_KEYSIZE_128)
542 		aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
543 	else if (ctx->key_length == AES_KEYSIZE_192)
544 		aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
545 	else
546 		aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
547 }
548 #endif
549 
ctr_crypt(struct skcipher_request * req)550 static int ctr_crypt(struct skcipher_request *req)
551 {
552 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
553 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
554 	struct skcipher_walk walk;
555 	unsigned int nbytes;
556 	int err;
557 
558 	err = skcipher_walk_virt(&walk, req, true);
559 
560 	kernel_fpu_begin();
561 	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
562 		aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
563 			              nbytes & AES_BLOCK_MASK, walk.iv);
564 		nbytes &= AES_BLOCK_SIZE - 1;
565 		err = skcipher_walk_done(&walk, nbytes);
566 	}
567 	if (walk.nbytes) {
568 		ctr_crypt_final(ctx, &walk);
569 		err = skcipher_walk_done(&walk, 0);
570 	}
571 	kernel_fpu_end();
572 
573 	return err;
574 }
575 
xts_aesni_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)576 static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
577 			    unsigned int keylen)
578 {
579 	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
580 	int err;
581 
582 	err = xts_verify_key(tfm, key, keylen);
583 	if (err)
584 		return err;
585 
586 	keylen /= 2;
587 
588 	/* first half of xts-key is for crypt */
589 	err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
590 				 key, keylen);
591 	if (err)
592 		return err;
593 
594 	/* second half of xts-key is for tweak */
595 	return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
596 				  key + keylen, keylen);
597 }
598 
599 
aesni_xts_tweak(void * ctx,u8 * out,const u8 * in)600 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
601 {
602 	aesni_enc(ctx, out, in);
603 }
604 
aesni_xts_enc(void * ctx,u128 * dst,const u128 * src,le128 * iv)605 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
606 {
607 	glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
608 }
609 
aesni_xts_dec(void * ctx,u128 * dst,const u128 * src,le128 * iv)610 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
611 {
612 	glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
613 }
614 
aesni_xts_enc8(void * ctx,u128 * dst,const u128 * src,le128 * iv)615 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
616 {
617 	aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
618 }
619 
aesni_xts_dec8(void * ctx,u128 * dst,const u128 * src,le128 * iv)620 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
621 {
622 	aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
623 }
624 
625 static const struct common_glue_ctx aesni_enc_xts = {
626 	.num_funcs = 2,
627 	.fpu_blocks_limit = 1,
628 
629 	.funcs = { {
630 		.num_blocks = 8,
631 		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
632 	}, {
633 		.num_blocks = 1,
634 		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
635 	} }
636 };
637 
638 static const struct common_glue_ctx aesni_dec_xts = {
639 	.num_funcs = 2,
640 	.fpu_blocks_limit = 1,
641 
642 	.funcs = { {
643 		.num_blocks = 8,
644 		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
645 	}, {
646 		.num_blocks = 1,
647 		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
648 	} }
649 };
650 
xts_encrypt(struct skcipher_request * req)651 static int xts_encrypt(struct skcipher_request *req)
652 {
653 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
654 	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
655 
656 	return glue_xts_req_128bit(&aesni_enc_xts, req,
657 				   XTS_TWEAK_CAST(aesni_xts_tweak),
658 				   aes_ctx(ctx->raw_tweak_ctx),
659 				   aes_ctx(ctx->raw_crypt_ctx));
660 }
661 
xts_decrypt(struct skcipher_request * req)662 static int xts_decrypt(struct skcipher_request *req)
663 {
664 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
665 	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
666 
667 	return glue_xts_req_128bit(&aesni_dec_xts, req,
668 				   XTS_TWEAK_CAST(aesni_xts_tweak),
669 				   aes_ctx(ctx->raw_tweak_ctx),
670 				   aes_ctx(ctx->raw_crypt_ctx));
671 }
672 
rfc4106_init(struct crypto_aead * aead)673 static int rfc4106_init(struct crypto_aead *aead)
674 {
675 	struct cryptd_aead *cryptd_tfm;
676 	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
677 
678 	cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
679 				       CRYPTO_ALG_INTERNAL,
680 				       CRYPTO_ALG_INTERNAL);
681 	if (IS_ERR(cryptd_tfm))
682 		return PTR_ERR(cryptd_tfm);
683 
684 	*ctx = cryptd_tfm;
685 	crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
686 	return 0;
687 }
688 
rfc4106_exit(struct crypto_aead * aead)689 static void rfc4106_exit(struct crypto_aead *aead)
690 {
691 	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
692 
693 	cryptd_free_aead(*ctx);
694 }
695 
696 static int
rfc4106_set_hash_subkey(u8 * hash_subkey,const u8 * key,unsigned int key_len)697 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
698 {
699 	struct crypto_cipher *tfm;
700 	int ret;
701 
702 	tfm = crypto_alloc_cipher("aes", 0, 0);
703 	if (IS_ERR(tfm))
704 		return PTR_ERR(tfm);
705 
706 	ret = crypto_cipher_setkey(tfm, key, key_len);
707 	if (ret)
708 		goto out_free_cipher;
709 
710 	/* Clear the data in the hash sub key container to zero.*/
711 	/* We want to cipher all zeros to create the hash sub key. */
712 	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
713 
714 	crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);
715 
716 out_free_cipher:
717 	crypto_free_cipher(tfm);
718 	return ret;
719 }
720 
common_rfc4106_set_key(struct crypto_aead * aead,const u8 * key,unsigned int key_len)721 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
722 				  unsigned int key_len)
723 {
724 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
725 
726 	if (key_len < 4) {
727 		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
728 		return -EINVAL;
729 	}
730 	/*Account for 4 byte nonce at the end.*/
731 	key_len -= 4;
732 
733 	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
734 
735 	return aes_set_key_common(crypto_aead_tfm(aead),
736 				  &ctx->aes_key_expanded, key, key_len) ?:
737 	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
738 }
739 
gcmaes_wrapper_set_key(struct crypto_aead * parent,const u8 * key,unsigned int key_len)740 static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key,
741 				  unsigned int key_len)
742 {
743 	struct cryptd_aead **ctx = crypto_aead_ctx(parent);
744 	struct cryptd_aead *cryptd_tfm = *ctx;
745 
746 	return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
747 }
748 
common_rfc4106_set_authsize(struct crypto_aead * aead,unsigned int authsize)749 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
750 				       unsigned int authsize)
751 {
752 	switch (authsize) {
753 	case 8:
754 	case 12:
755 	case 16:
756 		break;
757 	default:
758 		return -EINVAL;
759 	}
760 
761 	return 0;
762 }
763 
764 /* This is the Integrity Check Value (aka the authentication tag length and can
765  * be 8, 12 or 16 bytes long. */
gcmaes_wrapper_set_authsize(struct crypto_aead * parent,unsigned int authsize)766 static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent,
767 				       unsigned int authsize)
768 {
769 	struct cryptd_aead **ctx = crypto_aead_ctx(parent);
770 	struct cryptd_aead *cryptd_tfm = *ctx;
771 
772 	return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
773 }
774 
generic_gcmaes_set_authsize(struct crypto_aead * tfm,unsigned int authsize)775 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
776 				       unsigned int authsize)
777 {
778 	switch (authsize) {
779 	case 4:
780 	case 8:
781 	case 12:
782 	case 13:
783 	case 14:
784 	case 15:
785 	case 16:
786 		break;
787 	default:
788 		return -EINVAL;
789 	}
790 
791 	return 0;
792 }
793 
gcmaes_crypt_by_sg(bool enc,struct aead_request * req,unsigned int assoclen,u8 * hash_subkey,u8 * iv,void * aes_ctx)794 static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
795 			      unsigned int assoclen, u8 *hash_subkey,
796 			      u8 *iv, void *aes_ctx)
797 {
798 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
799 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
800 	struct gcm_context_data data AESNI_ALIGN_ATTR;
801 	struct scatter_walk dst_sg_walk = {};
802 	unsigned long left = req->cryptlen;
803 	unsigned long len, srclen, dstlen;
804 	struct scatter_walk assoc_sg_walk;
805 	struct scatter_walk src_sg_walk;
806 	struct scatterlist src_start[2];
807 	struct scatterlist dst_start[2];
808 	struct scatterlist *src_sg;
809 	struct scatterlist *dst_sg;
810 	u8 *src, *dst, *assoc;
811 	u8 *assocmem = NULL;
812 	u8 authTag[16];
813 
814 	if (!enc)
815 		left -= auth_tag_len;
816 
817 	/* Linearize assoc, if not already linear */
818 	if (req->src->length >= assoclen && req->src->length &&
819 		(!PageHighMem(sg_page(req->src)) ||
820 			req->src->offset + req->src->length <= PAGE_SIZE)) {
821 		scatterwalk_start(&assoc_sg_walk, req->src);
822 		assoc = scatterwalk_map(&assoc_sg_walk);
823 	} else {
824 		/* assoc can be any length, so must be on heap */
825 		assocmem = kmalloc(assoclen, GFP_ATOMIC);
826 		if (unlikely(!assocmem))
827 			return -ENOMEM;
828 		assoc = assocmem;
829 
830 		scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
831 	}
832 
833 	if (left) {
834 		src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
835 		scatterwalk_start(&src_sg_walk, src_sg);
836 		if (req->src != req->dst) {
837 			dst_sg = scatterwalk_ffwd(dst_start, req->dst,
838 						  req->assoclen);
839 			scatterwalk_start(&dst_sg_walk, dst_sg);
840 		}
841 	}
842 
843 	kernel_fpu_begin();
844 	aesni_gcm_init(aes_ctx, &data, iv,
845 		hash_subkey, assoc, assoclen);
846 	if (req->src != req->dst) {
847 		while (left) {
848 			src = scatterwalk_map(&src_sg_walk);
849 			dst = scatterwalk_map(&dst_sg_walk);
850 			srclen = scatterwalk_clamp(&src_sg_walk, left);
851 			dstlen = scatterwalk_clamp(&dst_sg_walk, left);
852 			len = min(srclen, dstlen);
853 			if (len) {
854 				if (enc)
855 					aesni_gcm_enc_update(aes_ctx, &data,
856 							     dst, src, len);
857 				else
858 					aesni_gcm_dec_update(aes_ctx, &data,
859 							     dst, src, len);
860 			}
861 			left -= len;
862 
863 			scatterwalk_unmap(src);
864 			scatterwalk_unmap(dst);
865 			scatterwalk_advance(&src_sg_walk, len);
866 			scatterwalk_advance(&dst_sg_walk, len);
867 			scatterwalk_done(&src_sg_walk, 0, left);
868 			scatterwalk_done(&dst_sg_walk, 1, left);
869 		}
870 	} else {
871 		while (left) {
872 			dst = src = scatterwalk_map(&src_sg_walk);
873 			len = scatterwalk_clamp(&src_sg_walk, left);
874 			if (len) {
875 				if (enc)
876 					aesni_gcm_enc_update(aes_ctx, &data,
877 							     src, src, len);
878 				else
879 					aesni_gcm_dec_update(aes_ctx, &data,
880 							     src, src, len);
881 			}
882 			left -= len;
883 			scatterwalk_unmap(src);
884 			scatterwalk_advance(&src_sg_walk, len);
885 			scatterwalk_done(&src_sg_walk, 1, left);
886 		}
887 	}
888 	aesni_gcm_finalize(aes_ctx, &data, authTag, auth_tag_len);
889 	kernel_fpu_end();
890 
891 	if (!assocmem)
892 		scatterwalk_unmap(assoc);
893 	else
894 		kfree(assocmem);
895 
896 	if (!enc) {
897 		u8 authTagMsg[16];
898 
899 		/* Copy out original authTag */
900 		scatterwalk_map_and_copy(authTagMsg, req->src,
901 					 req->assoclen + req->cryptlen -
902 					 auth_tag_len,
903 					 auth_tag_len, 0);
904 
905 		/* Compare generated tag with passed in tag. */
906 		return crypto_memneq(authTagMsg, authTag, auth_tag_len) ?
907 			-EBADMSG : 0;
908 	}
909 
910 	/* Copy in the authTag */
911 	scatterwalk_map_and_copy(authTag, req->dst,
912 				 req->assoclen + req->cryptlen,
913 				 auth_tag_len, 1);
914 
915 	return 0;
916 }
917 
gcmaes_encrypt(struct aead_request * req,unsigned int assoclen,u8 * hash_subkey,u8 * iv,void * aes_ctx)918 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
919 			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
920 {
921 	u8 one_entry_in_sg = 0;
922 	u8 *src, *dst, *assoc;
923 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
924 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
925 	struct scatter_walk src_sg_walk;
926 	struct scatter_walk dst_sg_walk = {};
927 	struct gcm_context_data data AESNI_ALIGN_ATTR;
928 
929 	if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 ||
930 		aesni_gcm_enc_tfm == aesni_gcm_enc ||
931 		req->cryptlen < AVX_GEN2_OPTSIZE) {
932 		return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
933 					  aes_ctx);
934 	}
935 	if (sg_is_last(req->src) &&
936 	    (!PageHighMem(sg_page(req->src)) ||
937 	    req->src->offset + req->src->length <= PAGE_SIZE) &&
938 	    sg_is_last(req->dst) &&
939 	    (!PageHighMem(sg_page(req->dst)) ||
940 	    req->dst->offset + req->dst->length <= PAGE_SIZE)) {
941 		one_entry_in_sg = 1;
942 		scatterwalk_start(&src_sg_walk, req->src);
943 		assoc = scatterwalk_map(&src_sg_walk);
944 		src = assoc + req->assoclen;
945 		dst = src;
946 		if (unlikely(req->src != req->dst)) {
947 			scatterwalk_start(&dst_sg_walk, req->dst);
948 			dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
949 		}
950 	} else {
951 		/* Allocate memory for src, dst, assoc */
952 		assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
953 			GFP_ATOMIC);
954 		if (unlikely(!assoc))
955 			return -ENOMEM;
956 		scatterwalk_map_and_copy(assoc, req->src, 0,
957 					 req->assoclen + req->cryptlen, 0);
958 		src = assoc + req->assoclen;
959 		dst = src;
960 	}
961 
962 	kernel_fpu_begin();
963 	aesni_gcm_enc_tfm(aes_ctx, &data, dst, src, req->cryptlen, iv,
964 			  hash_subkey, assoc, assoclen,
965 			  dst + req->cryptlen, auth_tag_len);
966 	kernel_fpu_end();
967 
968 	/* The authTag (aka the Integrity Check Value) needs to be written
969 	 * back to the packet. */
970 	if (one_entry_in_sg) {
971 		if (unlikely(req->src != req->dst)) {
972 			scatterwalk_unmap(dst - req->assoclen);
973 			scatterwalk_advance(&dst_sg_walk, req->dst->length);
974 			scatterwalk_done(&dst_sg_walk, 1, 0);
975 		}
976 		scatterwalk_unmap(assoc);
977 		scatterwalk_advance(&src_sg_walk, req->src->length);
978 		scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
979 	} else {
980 		scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
981 					 req->cryptlen + auth_tag_len, 1);
982 		kfree(assoc);
983 	}
984 	return 0;
985 }
986 
gcmaes_decrypt(struct aead_request * req,unsigned int assoclen,u8 * hash_subkey,u8 * iv,void * aes_ctx)987 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
988 			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
989 {
990 	u8 one_entry_in_sg = 0;
991 	u8 *src, *dst, *assoc;
992 	unsigned long tempCipherLen = 0;
993 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
994 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
995 	u8 authTag[16];
996 	struct scatter_walk src_sg_walk;
997 	struct scatter_walk dst_sg_walk = {};
998 	struct gcm_context_data data AESNI_ALIGN_ATTR;
999 	int retval = 0;
1000 
1001 	if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 ||
1002 		aesni_gcm_enc_tfm == aesni_gcm_enc ||
1003 		req->cryptlen < AVX_GEN2_OPTSIZE) {
1004 		return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
1005 					  aes_ctx);
1006 	}
1007 	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1008 
1009 	if (sg_is_last(req->src) &&
1010 	    (!PageHighMem(sg_page(req->src)) ||
1011 	    req->src->offset + req->src->length <= PAGE_SIZE) &&
1012 	    sg_is_last(req->dst) && req->dst->length &&
1013 	    (!PageHighMem(sg_page(req->dst)) ||
1014 	    req->dst->offset + req->dst->length <= PAGE_SIZE)) {
1015 		one_entry_in_sg = 1;
1016 		scatterwalk_start(&src_sg_walk, req->src);
1017 		assoc = scatterwalk_map(&src_sg_walk);
1018 		src = assoc + req->assoclen;
1019 		dst = src;
1020 		if (unlikely(req->src != req->dst)) {
1021 			scatterwalk_start(&dst_sg_walk, req->dst);
1022 			dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
1023 		}
1024 	} else {
1025 		/* Allocate memory for src, dst, assoc */
1026 		assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1027 		if (!assoc)
1028 			return -ENOMEM;
1029 		scatterwalk_map_and_copy(assoc, req->src, 0,
1030 					 req->assoclen + req->cryptlen, 0);
1031 		src = assoc + req->assoclen;
1032 		dst = src;
1033 	}
1034 
1035 
1036 	kernel_fpu_begin();
1037 	aesni_gcm_dec_tfm(aes_ctx, &data, dst, src, tempCipherLen, iv,
1038 			  hash_subkey, assoc, assoclen,
1039 			  authTag, auth_tag_len);
1040 	kernel_fpu_end();
1041 
1042 	/* Compare generated tag with passed in tag. */
1043 	retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
1044 		-EBADMSG : 0;
1045 
1046 	if (one_entry_in_sg) {
1047 		if (unlikely(req->src != req->dst)) {
1048 			scatterwalk_unmap(dst - req->assoclen);
1049 			scatterwalk_advance(&dst_sg_walk, req->dst->length);
1050 			scatterwalk_done(&dst_sg_walk, 1, 0);
1051 		}
1052 		scatterwalk_unmap(assoc);
1053 		scatterwalk_advance(&src_sg_walk, req->src->length);
1054 		scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
1055 	} else {
1056 		scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1057 					 tempCipherLen, 1);
1058 		kfree(assoc);
1059 	}
1060 	return retval;
1061 
1062 }
1063 
helper_rfc4106_encrypt(struct aead_request * req)1064 static int helper_rfc4106_encrypt(struct aead_request *req)
1065 {
1066 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1067 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1068 	void *aes_ctx = &(ctx->aes_key_expanded);
1069 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1070 	unsigned int i;
1071 	__be32 counter = cpu_to_be32(1);
1072 
1073 	/* Assuming we are supporting rfc4106 64-bit extended */
1074 	/* sequence numbers We need to have the AAD length equal */
1075 	/* to 16 or 20 bytes */
1076 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
1077 		return -EINVAL;
1078 
1079 	/* IV below built */
1080 	for (i = 0; i < 4; i++)
1081 		*(iv+i) = ctx->nonce[i];
1082 	for (i = 0; i < 8; i++)
1083 		*(iv+4+i) = req->iv[i];
1084 	*((__be32 *)(iv+12)) = counter;
1085 
1086 	return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
1087 			      aes_ctx);
1088 }
1089 
helper_rfc4106_decrypt(struct aead_request * req)1090 static int helper_rfc4106_decrypt(struct aead_request *req)
1091 {
1092 	__be32 counter = cpu_to_be32(1);
1093 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1094 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1095 	void *aes_ctx = &(ctx->aes_key_expanded);
1096 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1097 	unsigned int i;
1098 
1099 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
1100 		return -EINVAL;
1101 
1102 	/* Assuming we are supporting rfc4106 64-bit extended */
1103 	/* sequence numbers We need to have the AAD length */
1104 	/* equal to 16 or 20 bytes */
1105 
1106 	/* IV below built */
1107 	for (i = 0; i < 4; i++)
1108 		*(iv+i) = ctx->nonce[i];
1109 	for (i = 0; i < 8; i++)
1110 		*(iv+4+i) = req->iv[i];
1111 	*((__be32 *)(iv+12)) = counter;
1112 
1113 	return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
1114 			      aes_ctx);
1115 }
1116 
gcmaes_wrapper_encrypt(struct aead_request * req)1117 static int gcmaes_wrapper_encrypt(struct aead_request *req)
1118 {
1119 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1120 	struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1121 	struct cryptd_aead *cryptd_tfm = *ctx;
1122 
1123 	tfm = &cryptd_tfm->base;
1124 	if (irq_fpu_usable() && (!in_atomic() ||
1125 				 !cryptd_aead_queued(cryptd_tfm)))
1126 		tfm = cryptd_aead_child(cryptd_tfm);
1127 
1128 	aead_request_set_tfm(req, tfm);
1129 
1130 	return crypto_aead_encrypt(req);
1131 }
1132 
gcmaes_wrapper_decrypt(struct aead_request * req)1133 static int gcmaes_wrapper_decrypt(struct aead_request *req)
1134 {
1135 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1136 	struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1137 	struct cryptd_aead *cryptd_tfm = *ctx;
1138 
1139 	tfm = &cryptd_tfm->base;
1140 	if (irq_fpu_usable() && (!in_atomic() ||
1141 				 !cryptd_aead_queued(cryptd_tfm)))
1142 		tfm = cryptd_aead_child(cryptd_tfm);
1143 
1144 	aead_request_set_tfm(req, tfm);
1145 
1146 	return crypto_aead_decrypt(req);
1147 }
1148 #endif
1149 
1150 static struct crypto_alg aesni_algs[] = { {
1151 	.cra_name		= "aes",
1152 	.cra_driver_name	= "aes-aesni",
1153 	.cra_priority		= 300,
1154 	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
1155 	.cra_blocksize		= AES_BLOCK_SIZE,
1156 	.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1157 	.cra_module		= THIS_MODULE,
1158 	.cra_u	= {
1159 		.cipher	= {
1160 			.cia_min_keysize	= AES_MIN_KEY_SIZE,
1161 			.cia_max_keysize	= AES_MAX_KEY_SIZE,
1162 			.cia_setkey		= aes_set_key,
1163 			.cia_encrypt		= aes_encrypt,
1164 			.cia_decrypt		= aes_decrypt
1165 		}
1166 	}
1167 }, {
1168 	.cra_name		= "__aes",
1169 	.cra_driver_name	= "__aes-aesni",
1170 	.cra_priority		= 300,
1171 	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
1172 	.cra_blocksize		= AES_BLOCK_SIZE,
1173 	.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1174 	.cra_module		= THIS_MODULE,
1175 	.cra_u	= {
1176 		.cipher	= {
1177 			.cia_min_keysize	= AES_MIN_KEY_SIZE,
1178 			.cia_max_keysize	= AES_MAX_KEY_SIZE,
1179 			.cia_setkey		= aes_set_key,
1180 			.cia_encrypt		= __aes_encrypt,
1181 			.cia_decrypt		= __aes_decrypt
1182 		}
1183 	}
1184 } };
1185 
1186 static struct skcipher_alg aesni_skciphers[] = {
1187 	{
1188 		.base = {
1189 			.cra_name		= "__ecb(aes)",
1190 			.cra_driver_name	= "__ecb-aes-aesni",
1191 			.cra_priority		= 400,
1192 			.cra_flags		= CRYPTO_ALG_INTERNAL,
1193 			.cra_blocksize		= AES_BLOCK_SIZE,
1194 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1195 			.cra_module		= THIS_MODULE,
1196 		},
1197 		.min_keysize	= AES_MIN_KEY_SIZE,
1198 		.max_keysize	= AES_MAX_KEY_SIZE,
1199 		.setkey		= aesni_skcipher_setkey,
1200 		.encrypt	= ecb_encrypt,
1201 		.decrypt	= ecb_decrypt,
1202 	}, {
1203 		.base = {
1204 			.cra_name		= "__cbc(aes)",
1205 			.cra_driver_name	= "__cbc-aes-aesni",
1206 			.cra_priority		= 400,
1207 			.cra_flags		= CRYPTO_ALG_INTERNAL,
1208 			.cra_blocksize		= AES_BLOCK_SIZE,
1209 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1210 			.cra_module		= THIS_MODULE,
1211 		},
1212 		.min_keysize	= AES_MIN_KEY_SIZE,
1213 		.max_keysize	= AES_MAX_KEY_SIZE,
1214 		.ivsize		= AES_BLOCK_SIZE,
1215 		.setkey		= aesni_skcipher_setkey,
1216 		.encrypt	= cbc_encrypt,
1217 		.decrypt	= cbc_decrypt,
1218 #ifdef CONFIG_X86_64
1219 	}, {
1220 		.base = {
1221 			.cra_name		= "__ctr(aes)",
1222 			.cra_driver_name	= "__ctr-aes-aesni",
1223 			.cra_priority		= 400,
1224 			.cra_flags		= CRYPTO_ALG_INTERNAL,
1225 			.cra_blocksize		= 1,
1226 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1227 			.cra_module		= THIS_MODULE,
1228 		},
1229 		.min_keysize	= AES_MIN_KEY_SIZE,
1230 		.max_keysize	= AES_MAX_KEY_SIZE,
1231 		.ivsize		= AES_BLOCK_SIZE,
1232 		.chunksize	= AES_BLOCK_SIZE,
1233 		.setkey		= aesni_skcipher_setkey,
1234 		.encrypt	= ctr_crypt,
1235 		.decrypt	= ctr_crypt,
1236 	}, {
1237 		.base = {
1238 			.cra_name		= "__xts(aes)",
1239 			.cra_driver_name	= "__xts-aes-aesni",
1240 			.cra_priority		= 401,
1241 			.cra_flags		= CRYPTO_ALG_INTERNAL,
1242 			.cra_blocksize		= AES_BLOCK_SIZE,
1243 			.cra_ctxsize		= XTS_AES_CTX_SIZE,
1244 			.cra_module		= THIS_MODULE,
1245 		},
1246 		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
1247 		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
1248 		.ivsize		= AES_BLOCK_SIZE,
1249 		.setkey		= xts_aesni_setkey,
1250 		.encrypt	= xts_encrypt,
1251 		.decrypt	= xts_decrypt,
1252 #endif
1253 	}
1254 };
1255 
1256 static
1257 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1258 
1259 static struct {
1260 	const char *algname;
1261 	const char *drvname;
1262 	const char *basename;
1263 	struct simd_skcipher_alg *simd;
1264 } aesni_simd_skciphers2[] = {
1265 #if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \
1266     IS_BUILTIN(CONFIG_CRYPTO_PCBC)
1267 	{
1268 		.algname	= "pcbc(aes)",
1269 		.drvname	= "pcbc-aes-aesni",
1270 		.basename	= "fpu(pcbc(__aes-aesni))",
1271 	},
1272 #endif
1273 };
1274 
1275 #ifdef CONFIG_X86_64
generic_gcmaes_set_key(struct crypto_aead * aead,const u8 * key,unsigned int key_len)1276 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
1277 				  unsigned int key_len)
1278 {
1279 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
1280 
1281 	return aes_set_key_common(crypto_aead_tfm(aead),
1282 				  &ctx->aes_key_expanded, key, key_len) ?:
1283 	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
1284 }
1285 
generic_gcmaes_encrypt(struct aead_request * req)1286 static int generic_gcmaes_encrypt(struct aead_request *req)
1287 {
1288 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1289 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1290 	void *aes_ctx = &(ctx->aes_key_expanded);
1291 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1292 	__be32 counter = cpu_to_be32(1);
1293 
1294 	memcpy(iv, req->iv, 12);
1295 	*((__be32 *)(iv+12)) = counter;
1296 
1297 	return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1298 			      aes_ctx);
1299 }
1300 
generic_gcmaes_decrypt(struct aead_request * req)1301 static int generic_gcmaes_decrypt(struct aead_request *req)
1302 {
1303 	__be32 counter = cpu_to_be32(1);
1304 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1305 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1306 	void *aes_ctx = &(ctx->aes_key_expanded);
1307 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1308 
1309 	memcpy(iv, req->iv, 12);
1310 	*((__be32 *)(iv+12)) = counter;
1311 
1312 	return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1313 			      aes_ctx);
1314 }
1315 
generic_gcmaes_init(struct crypto_aead * aead)1316 static int generic_gcmaes_init(struct crypto_aead *aead)
1317 {
1318 	struct cryptd_aead *cryptd_tfm;
1319 	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
1320 
1321 	cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni",
1322 				       CRYPTO_ALG_INTERNAL,
1323 				       CRYPTO_ALG_INTERNAL);
1324 	if (IS_ERR(cryptd_tfm))
1325 		return PTR_ERR(cryptd_tfm);
1326 
1327 	*ctx = cryptd_tfm;
1328 	crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
1329 
1330 	return 0;
1331 }
1332 
generic_gcmaes_exit(struct crypto_aead * aead)1333 static void generic_gcmaes_exit(struct crypto_aead *aead)
1334 {
1335 	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
1336 
1337 	cryptd_free_aead(*ctx);
1338 }
1339 
1340 static struct aead_alg aesni_aead_algs[] = { {
1341 	.setkey			= common_rfc4106_set_key,
1342 	.setauthsize		= common_rfc4106_set_authsize,
1343 	.encrypt		= helper_rfc4106_encrypt,
1344 	.decrypt		= helper_rfc4106_decrypt,
1345 	.ivsize			= GCM_RFC4106_IV_SIZE,
1346 	.maxauthsize		= 16,
1347 	.base = {
1348 		.cra_name		= "__gcm-aes-aesni",
1349 		.cra_driver_name	= "__driver-gcm-aes-aesni",
1350 		.cra_flags		= CRYPTO_ALG_INTERNAL,
1351 		.cra_blocksize		= 1,
1352 		.cra_ctxsize		= sizeof(struct aesni_rfc4106_gcm_ctx),
1353 		.cra_alignmask		= AESNI_ALIGN - 1,
1354 		.cra_module		= THIS_MODULE,
1355 	},
1356 }, {
1357 	.init			= rfc4106_init,
1358 	.exit			= rfc4106_exit,
1359 	.setkey			= gcmaes_wrapper_set_key,
1360 	.setauthsize		= gcmaes_wrapper_set_authsize,
1361 	.encrypt		= gcmaes_wrapper_encrypt,
1362 	.decrypt		= gcmaes_wrapper_decrypt,
1363 	.ivsize			= GCM_RFC4106_IV_SIZE,
1364 	.maxauthsize		= 16,
1365 	.base = {
1366 		.cra_name		= "rfc4106(gcm(aes))",
1367 		.cra_driver_name	= "rfc4106-gcm-aesni",
1368 		.cra_priority		= 400,
1369 		.cra_flags		= CRYPTO_ALG_ASYNC,
1370 		.cra_blocksize		= 1,
1371 		.cra_ctxsize		= sizeof(struct cryptd_aead *),
1372 		.cra_module		= THIS_MODULE,
1373 	},
1374 }, {
1375 	.setkey			= generic_gcmaes_set_key,
1376 	.setauthsize		= generic_gcmaes_set_authsize,
1377 	.encrypt		= generic_gcmaes_encrypt,
1378 	.decrypt		= generic_gcmaes_decrypt,
1379 	.ivsize			= GCM_AES_IV_SIZE,
1380 	.maxauthsize		= 16,
1381 	.base = {
1382 		.cra_name		= "__generic-gcm-aes-aesni",
1383 		.cra_driver_name	= "__driver-generic-gcm-aes-aesni",
1384 		.cra_priority		= 0,
1385 		.cra_flags		= CRYPTO_ALG_INTERNAL,
1386 		.cra_blocksize		= 1,
1387 		.cra_ctxsize		= sizeof(struct generic_gcmaes_ctx),
1388 		.cra_alignmask		= AESNI_ALIGN - 1,
1389 		.cra_module		= THIS_MODULE,
1390 	},
1391 }, {
1392 	.init			= generic_gcmaes_init,
1393 	.exit			= generic_gcmaes_exit,
1394 	.setkey			= gcmaes_wrapper_set_key,
1395 	.setauthsize		= gcmaes_wrapper_set_authsize,
1396 	.encrypt		= gcmaes_wrapper_encrypt,
1397 	.decrypt		= gcmaes_wrapper_decrypt,
1398 	.ivsize			= GCM_AES_IV_SIZE,
1399 	.maxauthsize		= 16,
1400 	.base = {
1401 		.cra_name		= "gcm(aes)",
1402 		.cra_driver_name	= "generic-gcm-aesni",
1403 		.cra_priority		= 400,
1404 		.cra_flags		= CRYPTO_ALG_ASYNC,
1405 		.cra_blocksize		= 1,
1406 		.cra_ctxsize		= sizeof(struct cryptd_aead *),
1407 		.cra_module		= THIS_MODULE,
1408 	},
1409 } };
1410 #else
1411 static struct aead_alg aesni_aead_algs[0];
1412 #endif
1413 
1414 
1415 static const struct x86_cpu_id aesni_cpu_id[] = {
1416 	X86_FEATURE_MATCH(X86_FEATURE_AES),
1417 	{}
1418 };
1419 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1420 
aesni_free_simds(void)1421 static void aesni_free_simds(void)
1422 {
1423 	int i;
1424 
1425 	for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
1426 		    aesni_simd_skciphers[i]; i++)
1427 		simd_skcipher_free(aesni_simd_skciphers[i]);
1428 
1429 	for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
1430 		if (aesni_simd_skciphers2[i].simd)
1431 			simd_skcipher_free(aesni_simd_skciphers2[i].simd);
1432 }
1433 
aesni_init(void)1434 static int __init aesni_init(void)
1435 {
1436 	struct simd_skcipher_alg *simd;
1437 	const char *basename;
1438 	const char *algname;
1439 	const char *drvname;
1440 	int err;
1441 	int i;
1442 
1443 	if (!x86_match_cpu(aesni_cpu_id))
1444 		return -ENODEV;
1445 #ifdef CONFIG_X86_64
1446 #ifdef CONFIG_AS_AVX2
1447 	if (boot_cpu_has(X86_FEATURE_AVX2)) {
1448 		pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1449 		aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1450 		aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1451 	} else
1452 #endif
1453 #ifdef CONFIG_AS_AVX
1454 	if (boot_cpu_has(X86_FEATURE_AVX)) {
1455 		pr_info("AVX version of gcm_enc/dec engaged.\n");
1456 		aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1457 		aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1458 	} else
1459 #endif
1460 	{
1461 		pr_info("SSE version of gcm_enc/dec engaged.\n");
1462 		aesni_gcm_enc_tfm = aesni_gcm_enc;
1463 		aesni_gcm_dec_tfm = aesni_gcm_dec;
1464 	}
1465 	aesni_ctr_enc_tfm = aesni_ctr_enc;
1466 #ifdef CONFIG_AS_AVX
1467 	if (boot_cpu_has(X86_FEATURE_AVX)) {
1468 		/* optimize performance of ctr mode encryption transform */
1469 		aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1470 		pr_info("AES CTR mode by8 optimization enabled\n");
1471 	}
1472 #endif
1473 #endif
1474 
1475 	err = crypto_fpu_init();
1476 	if (err)
1477 		return err;
1478 
1479 	err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1480 	if (err)
1481 		goto fpu_exit;
1482 
1483 	err = crypto_register_skciphers(aesni_skciphers,
1484 					ARRAY_SIZE(aesni_skciphers));
1485 	if (err)
1486 		goto unregister_algs;
1487 
1488 	err = crypto_register_aeads(aesni_aead_algs,
1489 				    ARRAY_SIZE(aesni_aead_algs));
1490 	if (err)
1491 		goto unregister_skciphers;
1492 
1493 	for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) {
1494 		algname = aesni_skciphers[i].base.cra_name + 2;
1495 		drvname = aesni_skciphers[i].base.cra_driver_name + 2;
1496 		basename = aesni_skciphers[i].base.cra_driver_name;
1497 		simd = simd_skcipher_create_compat(algname, drvname, basename);
1498 		err = PTR_ERR(simd);
1499 		if (IS_ERR(simd))
1500 			goto unregister_simds;
1501 
1502 		aesni_simd_skciphers[i] = simd;
1503 	}
1504 
1505 	for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) {
1506 		algname = aesni_simd_skciphers2[i].algname;
1507 		drvname = aesni_simd_skciphers2[i].drvname;
1508 		basename = aesni_simd_skciphers2[i].basename;
1509 		simd = simd_skcipher_create_compat(algname, drvname, basename);
1510 		err = PTR_ERR(simd);
1511 		if (IS_ERR(simd))
1512 			continue;
1513 
1514 		aesni_simd_skciphers2[i].simd = simd;
1515 	}
1516 
1517 	return 0;
1518 
1519 unregister_simds:
1520 	aesni_free_simds();
1521 	crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1522 unregister_skciphers:
1523 	crypto_unregister_skciphers(aesni_skciphers,
1524 				    ARRAY_SIZE(aesni_skciphers));
1525 unregister_algs:
1526 	crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1527 fpu_exit:
1528 	crypto_fpu_exit();
1529 	return err;
1530 }
1531 
aesni_exit(void)1532 static void __exit aesni_exit(void)
1533 {
1534 	aesni_free_simds();
1535 	crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1536 	crypto_unregister_skciphers(aesni_skciphers,
1537 				    ARRAY_SIZE(aesni_skciphers));
1538 	crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1539 
1540 	crypto_fpu_exit();
1541 }
1542 
1543 late_initcall(aesni_init);
1544 module_exit(aesni_exit);
1545 
1546 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1547 MODULE_LICENSE("GPL");
1548 MODULE_ALIAS_CRYPTO("aes");
1549