• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Cryptographic API.
4  *
5  * s390 implementation of the AES Cipher Algorithm.
6  *
7  * s390 Version:
8  *   Copyright IBM Corp. 2005, 2017
9  *   Author(s): Jan Glauber (jang@de.ibm.com)
10  *		Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
11  *		Patrick Steuer <patrick.steuer@de.ibm.com>
12  *		Harald Freudenberger <freude@de.ibm.com>
13  *
14  * Derived from "crypto/aes_generic.c"
15  */
16 
17 #define KMSG_COMPONENT "aes_s390"
18 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
19 
20 #include <crypto/aes.h>
21 #include <crypto/algapi.h>
22 #include <crypto/ghash.h>
23 #include <crypto/internal/aead.h>
24 #include <crypto/internal/cipher.h>
25 #include <crypto/internal/skcipher.h>
26 #include <crypto/scatterwalk.h>
27 #include <linux/err.h>
28 #include <linux/module.h>
29 #include <linux/cpufeature.h>
30 #include <linux/init.h>
31 #include <linux/mutex.h>
32 #include <linux/fips.h>
33 #include <linux/string.h>
34 #include <crypto/xts.h>
35 #include <asm/cpacf.h>
36 
37 static u8 *ctrblk;
38 static DEFINE_MUTEX(ctrblk_lock);
39 
40 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
41 		    kma_functions;
42 
43 struct s390_aes_ctx {
44 	u8 key[AES_MAX_KEY_SIZE];
45 	int key_len;
46 	unsigned long fc;
47 	union {
48 		struct crypto_skcipher *skcipher;
49 		struct crypto_cipher *cip;
50 	} fallback;
51 };
52 
53 struct s390_xts_ctx {
54 	u8 key[32];
55 	u8 pcc_key[32];
56 	int key_len;
57 	unsigned long fc;
58 	struct crypto_skcipher *fallback;
59 };
60 
61 struct gcm_sg_walk {
62 	struct scatter_walk walk;
63 	unsigned int walk_bytes;
64 	u8 *walk_ptr;
65 	unsigned int walk_bytes_remain;
66 	u8 buf[AES_BLOCK_SIZE];
67 	unsigned int buf_bytes;
68 	u8 *ptr;
69 	unsigned int nbytes;
70 };
71 
setkey_fallback_cip(struct crypto_tfm * tfm,const u8 * in_key,unsigned int key_len)72 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
73 		unsigned int key_len)
74 {
75 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
76 
77 	sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
78 	sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
79 			CRYPTO_TFM_REQ_MASK);
80 
81 	return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
82 }
83 
aes_set_key(struct crypto_tfm * tfm,const u8 * in_key,unsigned int key_len)84 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
85 		       unsigned int key_len)
86 {
87 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
88 	unsigned long fc;
89 
90 	/* Pick the correct function code based on the key length */
91 	fc = (key_len == 16) ? CPACF_KM_AES_128 :
92 	     (key_len == 24) ? CPACF_KM_AES_192 :
93 	     (key_len == 32) ? CPACF_KM_AES_256 : 0;
94 
95 	/* Check if the function code is available */
96 	sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
97 	if (!sctx->fc)
98 		return setkey_fallback_cip(tfm, in_key, key_len);
99 
100 	sctx->key_len = key_len;
101 	memcpy(sctx->key, in_key, key_len);
102 	return 0;
103 }
104 
crypto_aes_encrypt(struct crypto_tfm * tfm,u8 * out,const u8 * in)105 static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
106 {
107 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
108 
109 	if (unlikely(!sctx->fc)) {
110 		crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
111 		return;
112 	}
113 	cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
114 }
115 
crypto_aes_decrypt(struct crypto_tfm * tfm,u8 * out,const u8 * in)116 static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
117 {
118 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
119 
120 	if (unlikely(!sctx->fc)) {
121 		crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
122 		return;
123 	}
124 	cpacf_km(sctx->fc | CPACF_DECRYPT,
125 		 &sctx->key, out, in, AES_BLOCK_SIZE);
126 }
127 
fallback_init_cip(struct crypto_tfm * tfm)128 static int fallback_init_cip(struct crypto_tfm *tfm)
129 {
130 	const char *name = tfm->__crt_alg->cra_name;
131 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
132 
133 	sctx->fallback.cip = crypto_alloc_cipher(name, 0,
134 						 CRYPTO_ALG_NEED_FALLBACK);
135 
136 	if (IS_ERR(sctx->fallback.cip)) {
137 		pr_err("Allocating AES fallback algorithm %s failed\n",
138 		       name);
139 		return PTR_ERR(sctx->fallback.cip);
140 	}
141 
142 	return 0;
143 }
144 
fallback_exit_cip(struct crypto_tfm * tfm)145 static void fallback_exit_cip(struct crypto_tfm *tfm)
146 {
147 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
148 
149 	crypto_free_cipher(sctx->fallback.cip);
150 	sctx->fallback.cip = NULL;
151 }
152 
153 static struct crypto_alg aes_alg = {
154 	.cra_name		=	"aes",
155 	.cra_driver_name	=	"aes-s390",
156 	.cra_priority		=	300,
157 	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER |
158 					CRYPTO_ALG_NEED_FALLBACK,
159 	.cra_blocksize		=	AES_BLOCK_SIZE,
160 	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
161 	.cra_module		=	THIS_MODULE,
162 	.cra_init               =       fallback_init_cip,
163 	.cra_exit               =       fallback_exit_cip,
164 	.cra_u			=	{
165 		.cipher = {
166 			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
167 			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
168 			.cia_setkey		=	aes_set_key,
169 			.cia_encrypt		=	crypto_aes_encrypt,
170 			.cia_decrypt		=	crypto_aes_decrypt,
171 		}
172 	}
173 };
174 
setkey_fallback_skcipher(struct crypto_skcipher * tfm,const u8 * key,unsigned int len)175 static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
176 				    unsigned int len)
177 {
178 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
179 
180 	crypto_skcipher_clear_flags(sctx->fallback.skcipher,
181 				    CRYPTO_TFM_REQ_MASK);
182 	crypto_skcipher_set_flags(sctx->fallback.skcipher,
183 				  crypto_skcipher_get_flags(tfm) &
184 				  CRYPTO_TFM_REQ_MASK);
185 	return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
186 }
187 
fallback_skcipher_crypt(struct s390_aes_ctx * sctx,struct skcipher_request * req,unsigned long modifier)188 static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
189 				   struct skcipher_request *req,
190 				   unsigned long modifier)
191 {
192 	struct skcipher_request *subreq = skcipher_request_ctx(req);
193 
194 	*subreq = *req;
195 	skcipher_request_set_tfm(subreq, sctx->fallback.skcipher);
196 	return (modifier & CPACF_DECRYPT) ?
197 		crypto_skcipher_decrypt(subreq) :
198 		crypto_skcipher_encrypt(subreq);
199 }
200 
ecb_aes_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)201 static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
202 			   unsigned int key_len)
203 {
204 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
205 	unsigned long fc;
206 
207 	/* Pick the correct function code based on the key length */
208 	fc = (key_len == 16) ? CPACF_KM_AES_128 :
209 	     (key_len == 24) ? CPACF_KM_AES_192 :
210 	     (key_len == 32) ? CPACF_KM_AES_256 : 0;
211 
212 	/* Check if the function code is available */
213 	sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
214 	if (!sctx->fc)
215 		return setkey_fallback_skcipher(tfm, in_key, key_len);
216 
217 	sctx->key_len = key_len;
218 	memcpy(sctx->key, in_key, key_len);
219 	return 0;
220 }
221 
ecb_aes_crypt(struct skcipher_request * req,unsigned long modifier)222 static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier)
223 {
224 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
225 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
226 	struct skcipher_walk walk;
227 	unsigned int nbytes, n;
228 	int ret;
229 
230 	if (unlikely(!sctx->fc))
231 		return fallback_skcipher_crypt(sctx, req, modifier);
232 
233 	ret = skcipher_walk_virt(&walk, req, false);
234 	while ((nbytes = walk.nbytes) != 0) {
235 		/* only use complete blocks */
236 		n = nbytes & ~(AES_BLOCK_SIZE - 1);
237 		cpacf_km(sctx->fc | modifier, sctx->key,
238 			 walk.dst.virt.addr, walk.src.virt.addr, n);
239 		ret = skcipher_walk_done(&walk, nbytes - n);
240 	}
241 	return ret;
242 }
243 
ecb_aes_encrypt(struct skcipher_request * req)244 static int ecb_aes_encrypt(struct skcipher_request *req)
245 {
246 	return ecb_aes_crypt(req, 0);
247 }
248 
ecb_aes_decrypt(struct skcipher_request * req)249 static int ecb_aes_decrypt(struct skcipher_request *req)
250 {
251 	return ecb_aes_crypt(req, CPACF_DECRYPT);
252 }
253 
fallback_init_skcipher(struct crypto_skcipher * tfm)254 static int fallback_init_skcipher(struct crypto_skcipher *tfm)
255 {
256 	const char *name = crypto_tfm_alg_name(&tfm->base);
257 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
258 
259 	sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0,
260 				CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
261 
262 	if (IS_ERR(sctx->fallback.skcipher)) {
263 		pr_err("Allocating AES fallback algorithm %s failed\n",
264 		       name);
265 		return PTR_ERR(sctx->fallback.skcipher);
266 	}
267 
268 	crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
269 				    crypto_skcipher_reqsize(sctx->fallback.skcipher));
270 	return 0;
271 }
272 
fallback_exit_skcipher(struct crypto_skcipher * tfm)273 static void fallback_exit_skcipher(struct crypto_skcipher *tfm)
274 {
275 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
276 
277 	crypto_free_skcipher(sctx->fallback.skcipher);
278 }
279 
280 static struct skcipher_alg ecb_aes_alg = {
281 	.base.cra_name		=	"ecb(aes)",
282 	.base.cra_driver_name	=	"ecb-aes-s390",
283 	.base.cra_priority	=	401,	/* combo: aes + ecb + 1 */
284 	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
285 	.base.cra_blocksize	=	AES_BLOCK_SIZE,
286 	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
287 	.base.cra_module	=	THIS_MODULE,
288 	.init			=	fallback_init_skcipher,
289 	.exit			=	fallback_exit_skcipher,
290 	.min_keysize		=	AES_MIN_KEY_SIZE,
291 	.max_keysize		=	AES_MAX_KEY_SIZE,
292 	.setkey			=	ecb_aes_set_key,
293 	.encrypt		=	ecb_aes_encrypt,
294 	.decrypt		=	ecb_aes_decrypt,
295 };
296 
cbc_aes_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)297 static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
298 			   unsigned int key_len)
299 {
300 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
301 	unsigned long fc;
302 
303 	/* Pick the correct function code based on the key length */
304 	fc = (key_len == 16) ? CPACF_KMC_AES_128 :
305 	     (key_len == 24) ? CPACF_KMC_AES_192 :
306 	     (key_len == 32) ? CPACF_KMC_AES_256 : 0;
307 
308 	/* Check if the function code is available */
309 	sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
310 	if (!sctx->fc)
311 		return setkey_fallback_skcipher(tfm, in_key, key_len);
312 
313 	sctx->key_len = key_len;
314 	memcpy(sctx->key, in_key, key_len);
315 	return 0;
316 }
317 
cbc_aes_crypt(struct skcipher_request * req,unsigned long modifier)318 static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
319 {
320 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
321 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
322 	struct skcipher_walk walk;
323 	unsigned int nbytes, n;
324 	int ret;
325 	struct {
326 		u8 iv[AES_BLOCK_SIZE];
327 		u8 key[AES_MAX_KEY_SIZE];
328 	} param;
329 
330 	if (unlikely(!sctx->fc))
331 		return fallback_skcipher_crypt(sctx, req, modifier);
332 
333 	ret = skcipher_walk_virt(&walk, req, false);
334 	if (ret)
335 		return ret;
336 	memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
337 	memcpy(param.key, sctx->key, sctx->key_len);
338 	while ((nbytes = walk.nbytes) != 0) {
339 		/* only use complete blocks */
340 		n = nbytes & ~(AES_BLOCK_SIZE - 1);
341 		cpacf_kmc(sctx->fc | modifier, &param,
342 			  walk.dst.virt.addr, walk.src.virt.addr, n);
343 		memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
344 		ret = skcipher_walk_done(&walk, nbytes - n);
345 	}
346 	memzero_explicit(&param, sizeof(param));
347 	return ret;
348 }
349 
cbc_aes_encrypt(struct skcipher_request * req)350 static int cbc_aes_encrypt(struct skcipher_request *req)
351 {
352 	return cbc_aes_crypt(req, 0);
353 }
354 
cbc_aes_decrypt(struct skcipher_request * req)355 static int cbc_aes_decrypt(struct skcipher_request *req)
356 {
357 	return cbc_aes_crypt(req, CPACF_DECRYPT);
358 }
359 
360 static struct skcipher_alg cbc_aes_alg = {
361 	.base.cra_name		=	"cbc(aes)",
362 	.base.cra_driver_name	=	"cbc-aes-s390",
363 	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
364 	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
365 	.base.cra_blocksize	=	AES_BLOCK_SIZE,
366 	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
367 	.base.cra_module	=	THIS_MODULE,
368 	.init			=	fallback_init_skcipher,
369 	.exit			=	fallback_exit_skcipher,
370 	.min_keysize		=	AES_MIN_KEY_SIZE,
371 	.max_keysize		=	AES_MAX_KEY_SIZE,
372 	.ivsize			=	AES_BLOCK_SIZE,
373 	.setkey			=	cbc_aes_set_key,
374 	.encrypt		=	cbc_aes_encrypt,
375 	.decrypt		=	cbc_aes_decrypt,
376 };
377 
xts_fallback_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int len)378 static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
379 			       unsigned int len)
380 {
381 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
382 
383 	crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
384 	crypto_skcipher_set_flags(xts_ctx->fallback,
385 				  crypto_skcipher_get_flags(tfm) &
386 				  CRYPTO_TFM_REQ_MASK);
387 	return crypto_skcipher_setkey(xts_ctx->fallback, key, len);
388 }
389 
xts_aes_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)390 static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
391 			   unsigned int key_len)
392 {
393 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
394 	unsigned long fc;
395 	int err;
396 
397 	err = xts_fallback_setkey(tfm, in_key, key_len);
398 	if (err)
399 		return err;
400 
401 	/* In fips mode only 128 bit or 256 bit keys are valid */
402 	if (fips_enabled && key_len != 32 && key_len != 64)
403 		return -EINVAL;
404 
405 	/* Pick the correct function code based on the key length */
406 	fc = (key_len == 32) ? CPACF_KM_XTS_128 :
407 	     (key_len == 64) ? CPACF_KM_XTS_256 : 0;
408 
409 	/* Check if the function code is available */
410 	xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
411 	if (!xts_ctx->fc)
412 		return 0;
413 
414 	/* Split the XTS key into the two subkeys */
415 	key_len = key_len / 2;
416 	xts_ctx->key_len = key_len;
417 	memcpy(xts_ctx->key, in_key, key_len);
418 	memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
419 	return 0;
420 }
421 
xts_aes_crypt(struct skcipher_request * req,unsigned long modifier)422 static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
423 {
424 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
425 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
426 	struct skcipher_walk walk;
427 	unsigned int offset, nbytes, n;
428 	int ret;
429 	struct {
430 		u8 key[32];
431 		u8 tweak[16];
432 		u8 block[16];
433 		u8 bit[16];
434 		u8 xts[16];
435 	} pcc_param;
436 	struct {
437 		u8 key[32];
438 		u8 init[16];
439 	} xts_param;
440 
441 	if (req->cryptlen < AES_BLOCK_SIZE)
442 		return -EINVAL;
443 
444 	if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
445 		struct skcipher_request *subreq = skcipher_request_ctx(req);
446 
447 		*subreq = *req;
448 		skcipher_request_set_tfm(subreq, xts_ctx->fallback);
449 		return (modifier & CPACF_DECRYPT) ?
450 			crypto_skcipher_decrypt(subreq) :
451 			crypto_skcipher_encrypt(subreq);
452 	}
453 
454 	ret = skcipher_walk_virt(&walk, req, false);
455 	if (ret)
456 		return ret;
457 	offset = xts_ctx->key_len & 0x10;
458 	memset(pcc_param.block, 0, sizeof(pcc_param.block));
459 	memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
460 	memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
461 	memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
462 	memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
463 	cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
464 
465 	memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
466 	memcpy(xts_param.init, pcc_param.xts, 16);
467 
468 	while ((nbytes = walk.nbytes) != 0) {
469 		/* only use complete blocks */
470 		n = nbytes & ~(AES_BLOCK_SIZE - 1);
471 		cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
472 			 walk.dst.virt.addr, walk.src.virt.addr, n);
473 		ret = skcipher_walk_done(&walk, nbytes - n);
474 	}
475 	memzero_explicit(&pcc_param, sizeof(pcc_param));
476 	memzero_explicit(&xts_param, sizeof(xts_param));
477 	return ret;
478 }
479 
xts_aes_encrypt(struct skcipher_request * req)480 static int xts_aes_encrypt(struct skcipher_request *req)
481 {
482 	return xts_aes_crypt(req, 0);
483 }
484 
xts_aes_decrypt(struct skcipher_request * req)485 static int xts_aes_decrypt(struct skcipher_request *req)
486 {
487 	return xts_aes_crypt(req, CPACF_DECRYPT);
488 }
489 
xts_fallback_init(struct crypto_skcipher * tfm)490 static int xts_fallback_init(struct crypto_skcipher *tfm)
491 {
492 	const char *name = crypto_tfm_alg_name(&tfm->base);
493 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
494 
495 	xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
496 				CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
497 
498 	if (IS_ERR(xts_ctx->fallback)) {
499 		pr_err("Allocating XTS fallback algorithm %s failed\n",
500 		       name);
501 		return PTR_ERR(xts_ctx->fallback);
502 	}
503 	crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
504 				    crypto_skcipher_reqsize(xts_ctx->fallback));
505 	return 0;
506 }
507 
xts_fallback_exit(struct crypto_skcipher * tfm)508 static void xts_fallback_exit(struct crypto_skcipher *tfm)
509 {
510 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
511 
512 	crypto_free_skcipher(xts_ctx->fallback);
513 }
514 
515 static struct skcipher_alg xts_aes_alg = {
516 	.base.cra_name		=	"xts(aes)",
517 	.base.cra_driver_name	=	"xts-aes-s390",
518 	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
519 	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
520 	.base.cra_blocksize	=	AES_BLOCK_SIZE,
521 	.base.cra_ctxsize	=	sizeof(struct s390_xts_ctx),
522 	.base.cra_module	=	THIS_MODULE,
523 	.init			=	xts_fallback_init,
524 	.exit			=	xts_fallback_exit,
525 	.min_keysize		=	2 * AES_MIN_KEY_SIZE,
526 	.max_keysize		=	2 * AES_MAX_KEY_SIZE,
527 	.ivsize			=	AES_BLOCK_SIZE,
528 	.setkey			=	xts_aes_set_key,
529 	.encrypt		=	xts_aes_encrypt,
530 	.decrypt		=	xts_aes_decrypt,
531 };
532 
ctr_aes_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)533 static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
534 			   unsigned int key_len)
535 {
536 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
537 	unsigned long fc;
538 
539 	/* Pick the correct function code based on the key length */
540 	fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
541 	     (key_len == 24) ? CPACF_KMCTR_AES_192 :
542 	     (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
543 
544 	/* Check if the function code is available */
545 	sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
546 	if (!sctx->fc)
547 		return setkey_fallback_skcipher(tfm, in_key, key_len);
548 
549 	sctx->key_len = key_len;
550 	memcpy(sctx->key, in_key, key_len);
551 	return 0;
552 }
553 
__ctrblk_init(u8 * ctrptr,u8 * iv,unsigned int nbytes)554 static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
555 {
556 	unsigned int i, n;
557 
558 	/* only use complete blocks, max. PAGE_SIZE */
559 	memcpy(ctrptr, iv, AES_BLOCK_SIZE);
560 	n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
561 	for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
562 		memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
563 		crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
564 		ctrptr += AES_BLOCK_SIZE;
565 	}
566 	return n;
567 }
568 
ctr_aes_crypt(struct skcipher_request * req)569 static int ctr_aes_crypt(struct skcipher_request *req)
570 {
571 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
572 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
573 	u8 buf[AES_BLOCK_SIZE], *ctrptr;
574 	struct skcipher_walk walk;
575 	unsigned int n, nbytes;
576 	int ret, locked;
577 
578 	if (unlikely(!sctx->fc))
579 		return fallback_skcipher_crypt(sctx, req, 0);
580 
581 	locked = mutex_trylock(&ctrblk_lock);
582 
583 	ret = skcipher_walk_virt(&walk, req, false);
584 	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
585 		n = AES_BLOCK_SIZE;
586 
587 		if (nbytes >= 2*AES_BLOCK_SIZE && locked)
588 			n = __ctrblk_init(ctrblk, walk.iv, nbytes);
589 		ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
590 		cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr,
591 			    walk.src.virt.addr, n, ctrptr);
592 		if (ctrptr == ctrblk)
593 			memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE,
594 			       AES_BLOCK_SIZE);
595 		crypto_inc(walk.iv, AES_BLOCK_SIZE);
596 		ret = skcipher_walk_done(&walk, nbytes - n);
597 	}
598 	if (locked)
599 		mutex_unlock(&ctrblk_lock);
600 	/*
601 	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
602 	 */
603 	if (nbytes) {
604 		memset(buf, 0, AES_BLOCK_SIZE);
605 		memcpy(buf, walk.src.virt.addr, nbytes);
606 		cpacf_kmctr(sctx->fc, sctx->key, buf, buf,
607 			    AES_BLOCK_SIZE, walk.iv);
608 		memcpy(walk.dst.virt.addr, buf, nbytes);
609 		crypto_inc(walk.iv, AES_BLOCK_SIZE);
610 		ret = skcipher_walk_done(&walk, 0);
611 	}
612 
613 	return ret;
614 }
615 
616 static struct skcipher_alg ctr_aes_alg = {
617 	.base.cra_name		=	"ctr(aes)",
618 	.base.cra_driver_name	=	"ctr-aes-s390",
619 	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
620 	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
621 	.base.cra_blocksize	=	1,
622 	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
623 	.base.cra_module	=	THIS_MODULE,
624 	.init			=	fallback_init_skcipher,
625 	.exit			=	fallback_exit_skcipher,
626 	.min_keysize		=	AES_MIN_KEY_SIZE,
627 	.max_keysize		=	AES_MAX_KEY_SIZE,
628 	.ivsize			=	AES_BLOCK_SIZE,
629 	.setkey			=	ctr_aes_set_key,
630 	.encrypt		=	ctr_aes_crypt,
631 	.decrypt		=	ctr_aes_crypt,
632 	.chunksize		=	AES_BLOCK_SIZE,
633 };
634 
gcm_aes_setkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)635 static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
636 			  unsigned int keylen)
637 {
638 	struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
639 
640 	switch (keylen) {
641 	case AES_KEYSIZE_128:
642 		ctx->fc = CPACF_KMA_GCM_AES_128;
643 		break;
644 	case AES_KEYSIZE_192:
645 		ctx->fc = CPACF_KMA_GCM_AES_192;
646 		break;
647 	case AES_KEYSIZE_256:
648 		ctx->fc = CPACF_KMA_GCM_AES_256;
649 		break;
650 	default:
651 		return -EINVAL;
652 	}
653 
654 	memcpy(ctx->key, key, keylen);
655 	ctx->key_len = keylen;
656 	return 0;
657 }
658 
gcm_aes_setauthsize(struct crypto_aead * tfm,unsigned int authsize)659 static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
660 {
661 	switch (authsize) {
662 	case 4:
663 	case 8:
664 	case 12:
665 	case 13:
666 	case 14:
667 	case 15:
668 	case 16:
669 		break;
670 	default:
671 		return -EINVAL;
672 	}
673 
674 	return 0;
675 }
676 
gcm_walk_start(struct gcm_sg_walk * gw,struct scatterlist * sg,unsigned int len)677 static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
678 			   unsigned int len)
679 {
680 	memset(gw, 0, sizeof(*gw));
681 	gw->walk_bytes_remain = len;
682 	scatterwalk_start(&gw->walk, sg);
683 }
684 
_gcm_sg_clamp_and_map(struct gcm_sg_walk * gw)685 static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
686 {
687 	struct scatterlist *nextsg;
688 
689 	gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
690 	while (!gw->walk_bytes) {
691 		nextsg = sg_next(gw->walk.sg);
692 		if (!nextsg)
693 			return 0;
694 		scatterwalk_start(&gw->walk, nextsg);
695 		gw->walk_bytes = scatterwalk_clamp(&gw->walk,
696 						   gw->walk_bytes_remain);
697 	}
698 	gw->walk_ptr = scatterwalk_map(&gw->walk);
699 	return gw->walk_bytes;
700 }
701 
_gcm_sg_unmap_and_advance(struct gcm_sg_walk * gw,unsigned int nbytes)702 static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
703 					     unsigned int nbytes)
704 {
705 	gw->walk_bytes_remain -= nbytes;
706 	scatterwalk_unmap(gw->walk_ptr);
707 	scatterwalk_advance(&gw->walk, nbytes);
708 	scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
709 	gw->walk_ptr = NULL;
710 }
711 
gcm_in_walk_go(struct gcm_sg_walk * gw,unsigned int minbytesneeded)712 static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
713 {
714 	int n;
715 
716 	if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
717 		gw->ptr = gw->buf;
718 		gw->nbytes = gw->buf_bytes;
719 		goto out;
720 	}
721 
722 	if (gw->walk_bytes_remain == 0) {
723 		gw->ptr = NULL;
724 		gw->nbytes = 0;
725 		goto out;
726 	}
727 
728 	if (!_gcm_sg_clamp_and_map(gw)) {
729 		gw->ptr = NULL;
730 		gw->nbytes = 0;
731 		goto out;
732 	}
733 
734 	if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
735 		gw->ptr = gw->walk_ptr;
736 		gw->nbytes = gw->walk_bytes;
737 		goto out;
738 	}
739 
740 	while (1) {
741 		n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
742 		memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
743 		gw->buf_bytes += n;
744 		_gcm_sg_unmap_and_advance(gw, n);
745 		if (gw->buf_bytes >= minbytesneeded) {
746 			gw->ptr = gw->buf;
747 			gw->nbytes = gw->buf_bytes;
748 			goto out;
749 		}
750 		if (!_gcm_sg_clamp_and_map(gw)) {
751 			gw->ptr = NULL;
752 			gw->nbytes = 0;
753 			goto out;
754 		}
755 	}
756 
757 out:
758 	return gw->nbytes;
759 }
760 
gcm_out_walk_go(struct gcm_sg_walk * gw,unsigned int minbytesneeded)761 static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
762 {
763 	if (gw->walk_bytes_remain == 0) {
764 		gw->ptr = NULL;
765 		gw->nbytes = 0;
766 		goto out;
767 	}
768 
769 	if (!_gcm_sg_clamp_and_map(gw)) {
770 		gw->ptr = NULL;
771 		gw->nbytes = 0;
772 		goto out;
773 	}
774 
775 	if (gw->walk_bytes >= minbytesneeded) {
776 		gw->ptr = gw->walk_ptr;
777 		gw->nbytes = gw->walk_bytes;
778 		goto out;
779 	}
780 
781 	scatterwalk_unmap(gw->walk_ptr);
782 	gw->walk_ptr = NULL;
783 
784 	gw->ptr = gw->buf;
785 	gw->nbytes = sizeof(gw->buf);
786 
787 out:
788 	return gw->nbytes;
789 }
790 
gcm_in_walk_done(struct gcm_sg_walk * gw,unsigned int bytesdone)791 static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
792 {
793 	if (gw->ptr == NULL)
794 		return 0;
795 
796 	if (gw->ptr == gw->buf) {
797 		int n = gw->buf_bytes - bytesdone;
798 		if (n > 0) {
799 			memmove(gw->buf, gw->buf + bytesdone, n);
800 			gw->buf_bytes = n;
801 		} else
802 			gw->buf_bytes = 0;
803 	} else
804 		_gcm_sg_unmap_and_advance(gw, bytesdone);
805 
806 	return bytesdone;
807 }
808 
gcm_out_walk_done(struct gcm_sg_walk * gw,unsigned int bytesdone)809 static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
810 {
811 	int i, n;
812 
813 	if (gw->ptr == NULL)
814 		return 0;
815 
816 	if (gw->ptr == gw->buf) {
817 		for (i = 0; i < bytesdone; i += n) {
818 			if (!_gcm_sg_clamp_and_map(gw))
819 				return i;
820 			n = min(gw->walk_bytes, bytesdone - i);
821 			memcpy(gw->walk_ptr, gw->buf + i, n);
822 			_gcm_sg_unmap_and_advance(gw, n);
823 		}
824 	} else
825 		_gcm_sg_unmap_and_advance(gw, bytesdone);
826 
827 	return bytesdone;
828 }
829 
gcm_aes_crypt(struct aead_request * req,unsigned int flags)830 static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
831 {
832 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
833 	struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
834 	unsigned int ivsize = crypto_aead_ivsize(tfm);
835 	unsigned int taglen = crypto_aead_authsize(tfm);
836 	unsigned int aadlen = req->assoclen;
837 	unsigned int pclen = req->cryptlen;
838 	int ret = 0;
839 
840 	unsigned int n, len, in_bytes, out_bytes,
841 		     min_bytes, bytes, aad_bytes, pc_bytes;
842 	struct gcm_sg_walk gw_in, gw_out;
843 	u8 tag[GHASH_DIGEST_SIZE];
844 
845 	struct {
846 		u32 _[3];		/* reserved */
847 		u32 cv;			/* Counter Value */
848 		u8 t[GHASH_DIGEST_SIZE];/* Tag */
849 		u8 h[AES_BLOCK_SIZE];	/* Hash-subkey */
850 		u64 taadl;		/* Total AAD Length */
851 		u64 tpcl;		/* Total Plain-/Cipher-text Length */
852 		u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
853 		u8 k[AES_MAX_KEY_SIZE];	/* Key */
854 	} param;
855 
856 	/*
857 	 * encrypt
858 	 *   req->src: aad||plaintext
859 	 *   req->dst: aad||ciphertext||tag
860 	 * decrypt
861 	 *   req->src: aad||ciphertext||tag
862 	 *   req->dst: aad||plaintext, return 0 or -EBADMSG
863 	 * aad, plaintext and ciphertext may be empty.
864 	 */
865 	if (flags & CPACF_DECRYPT)
866 		pclen -= taglen;
867 	len = aadlen + pclen;
868 
869 	memset(&param, 0, sizeof(param));
870 	param.cv = 1;
871 	param.taadl = aadlen * 8;
872 	param.tpcl = pclen * 8;
873 	memcpy(param.j0, req->iv, ivsize);
874 	*(u32 *)(param.j0 + ivsize) = 1;
875 	memcpy(param.k, ctx->key, ctx->key_len);
876 
877 	gcm_walk_start(&gw_in, req->src, len);
878 	gcm_walk_start(&gw_out, req->dst, len);
879 
880 	do {
881 		min_bytes = min_t(unsigned int,
882 				  aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
883 		in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
884 		out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
885 		bytes = min(in_bytes, out_bytes);
886 
887 		if (aadlen + pclen <= bytes) {
888 			aad_bytes = aadlen;
889 			pc_bytes = pclen;
890 			flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
891 		} else {
892 			if (aadlen <= bytes) {
893 				aad_bytes = aadlen;
894 				pc_bytes = (bytes - aadlen) &
895 					   ~(AES_BLOCK_SIZE - 1);
896 				flags |= CPACF_KMA_LAAD;
897 			} else {
898 				aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
899 				pc_bytes = 0;
900 			}
901 		}
902 
903 		if (aad_bytes > 0)
904 			memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
905 
906 		cpacf_kma(ctx->fc | flags, &param,
907 			  gw_out.ptr + aad_bytes,
908 			  gw_in.ptr + aad_bytes, pc_bytes,
909 			  gw_in.ptr, aad_bytes);
910 
911 		n = aad_bytes + pc_bytes;
912 		if (gcm_in_walk_done(&gw_in, n) != n)
913 			return -ENOMEM;
914 		if (gcm_out_walk_done(&gw_out, n) != n)
915 			return -ENOMEM;
916 		aadlen -= aad_bytes;
917 		pclen -= pc_bytes;
918 	} while (aadlen + pclen > 0);
919 
920 	if (flags & CPACF_DECRYPT) {
921 		scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
922 		if (crypto_memneq(tag, param.t, taglen))
923 			ret = -EBADMSG;
924 	} else
925 		scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
926 
927 	memzero_explicit(&param, sizeof(param));
928 	return ret;
929 }
930 
gcm_aes_encrypt(struct aead_request * req)931 static int gcm_aes_encrypt(struct aead_request *req)
932 {
933 	return gcm_aes_crypt(req, CPACF_ENCRYPT);
934 }
935 
gcm_aes_decrypt(struct aead_request * req)936 static int gcm_aes_decrypt(struct aead_request *req)
937 {
938 	return gcm_aes_crypt(req, CPACF_DECRYPT);
939 }
940 
941 static struct aead_alg gcm_aes_aead = {
942 	.setkey			= gcm_aes_setkey,
943 	.setauthsize		= gcm_aes_setauthsize,
944 	.encrypt		= gcm_aes_encrypt,
945 	.decrypt		= gcm_aes_decrypt,
946 
947 	.ivsize			= GHASH_BLOCK_SIZE - sizeof(u32),
948 	.maxauthsize		= GHASH_DIGEST_SIZE,
949 	.chunksize		= AES_BLOCK_SIZE,
950 
951 	.base			= {
952 		.cra_blocksize		= 1,
953 		.cra_ctxsize		= sizeof(struct s390_aes_ctx),
954 		.cra_priority		= 900,
955 		.cra_name		= "gcm(aes)",
956 		.cra_driver_name	= "gcm-aes-s390",
957 		.cra_module		= THIS_MODULE,
958 	},
959 };
960 
961 static struct crypto_alg *aes_s390_alg;
962 static struct skcipher_alg *aes_s390_skcipher_algs[4];
963 static int aes_s390_skciphers_num;
964 static struct aead_alg *aes_s390_aead_alg;
965 
aes_s390_register_skcipher(struct skcipher_alg * alg)966 static int aes_s390_register_skcipher(struct skcipher_alg *alg)
967 {
968 	int ret;
969 
970 	ret = crypto_register_skcipher(alg);
971 	if (!ret)
972 		aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg;
973 	return ret;
974 }
975 
aes_s390_fini(void)976 static void aes_s390_fini(void)
977 {
978 	if (aes_s390_alg)
979 		crypto_unregister_alg(aes_s390_alg);
980 	while (aes_s390_skciphers_num--)
981 		crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
982 	if (ctrblk)
983 		free_page((unsigned long) ctrblk);
984 
985 	if (aes_s390_aead_alg)
986 		crypto_unregister_aead(aes_s390_aead_alg);
987 }
988 
aes_s390_init(void)989 static int __init aes_s390_init(void)
990 {
991 	int ret;
992 
993 	/* Query available functions for KM, KMC, KMCTR and KMA */
994 	cpacf_query(CPACF_KM, &km_functions);
995 	cpacf_query(CPACF_KMC, &kmc_functions);
996 	cpacf_query(CPACF_KMCTR, &kmctr_functions);
997 	cpacf_query(CPACF_KMA, &kma_functions);
998 
999 	if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
1000 	    cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
1001 	    cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
1002 		ret = crypto_register_alg(&aes_alg);
1003 		if (ret)
1004 			goto out_err;
1005 		aes_s390_alg = &aes_alg;
1006 		ret = aes_s390_register_skcipher(&ecb_aes_alg);
1007 		if (ret)
1008 			goto out_err;
1009 	}
1010 
1011 	if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
1012 	    cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
1013 	    cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
1014 		ret = aes_s390_register_skcipher(&cbc_aes_alg);
1015 		if (ret)
1016 			goto out_err;
1017 	}
1018 
1019 	if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
1020 	    cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
1021 		ret = aes_s390_register_skcipher(&xts_aes_alg);
1022 		if (ret)
1023 			goto out_err;
1024 	}
1025 
1026 	if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
1027 	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
1028 	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
1029 		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
1030 		if (!ctrblk) {
1031 			ret = -ENOMEM;
1032 			goto out_err;
1033 		}
1034 		ret = aes_s390_register_skcipher(&ctr_aes_alg);
1035 		if (ret)
1036 			goto out_err;
1037 	}
1038 
1039 	if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
1040 	    cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
1041 	    cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
1042 		ret = crypto_register_aead(&gcm_aes_aead);
1043 		if (ret)
1044 			goto out_err;
1045 		aes_s390_aead_alg = &gcm_aes_aead;
1046 	}
1047 
1048 	return 0;
1049 out_err:
1050 	aes_s390_fini();
1051 	return ret;
1052 }
1053 
1054 module_cpu_feature_match(MSA, aes_s390_init);
1055 module_exit(aes_s390_fini);
1056 
1057 MODULE_ALIAS_CRYPTO("aes-all");
1058 
1059 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1060 MODULE_LICENSE("GPL");
1061 MODULE_IMPORT_NS(CRYPTO_INTERNAL);
1062