1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Cryptographic API.
4 *
5 * s390 implementation of the AES Cipher Algorithm with protected keys.
6 *
7 * s390 Version:
8 * Copyright IBM Corp. 2017,2020
9 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Harald Freudenberger <freude@de.ibm.com>
11 */
12
13 #define KMSG_COMPONENT "paes_s390"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15
16 #include <crypto/aes.h>
17 #include <crypto/algapi.h>
18 #include <linux/bug.h>
19 #include <linux/err.h>
20 #include <linux/module.h>
21 #include <linux/cpufeature.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/spinlock.h>
25 #include <crypto/internal/skcipher.h>
26 #include <crypto/xts.h>
27 #include <asm/cpacf.h>
28 #include <asm/pkey.h>
29
30 /*
31 * Key blobs smaller/bigger than these defines are rejected
32 * by the common code even before the individual setkey function
33 * is called. As paes can handle different kinds of key blobs
34 * and padding is also possible, the limits need to be generous.
35 */
36 #define PAES_MIN_KEYSIZE 16
37 #define PAES_MAX_KEYSIZE 320
38
39 static u8 *ctrblk;
40 static DEFINE_MUTEX(ctrblk_lock);
41
42 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
43
44 struct key_blob {
45 /*
46 * Small keys will be stored in the keybuf. Larger keys are
47 * stored in extra allocated memory. In both cases does
48 * key point to the memory where the key is stored.
49 * The code distinguishes by checking keylen against
50 * sizeof(keybuf). See the two following helper functions.
51 */
52 u8 *key;
53 u8 keybuf[128];
54 unsigned int keylen;
55 };
56
_key_to_kb(struct key_blob * kb,const u8 * key,unsigned int keylen)57 static inline int _key_to_kb(struct key_blob *kb,
58 const u8 *key,
59 unsigned int keylen)
60 {
61 struct clearkey_header {
62 u8 type;
63 u8 res0[3];
64 u8 version;
65 u8 res1[3];
66 u32 keytype;
67 u32 len;
68 } __packed * h;
69
70 switch (keylen) {
71 case 16:
72 case 24:
73 case 32:
74 /* clear key value, prepare pkey clear key token in keybuf */
75 memset(kb->keybuf, 0, sizeof(kb->keybuf));
76 h = (struct clearkey_header *) kb->keybuf;
77 h->version = 0x02; /* TOKVER_CLEAR_KEY */
78 h->keytype = (keylen - 8) >> 3;
79 h->len = keylen;
80 memcpy(kb->keybuf + sizeof(*h), key, keylen);
81 kb->keylen = sizeof(*h) + keylen;
82 kb->key = kb->keybuf;
83 break;
84 default:
85 /* other key material, let pkey handle this */
86 if (keylen <= sizeof(kb->keybuf))
87 kb->key = kb->keybuf;
88 else {
89 kb->key = kmalloc(keylen, GFP_KERNEL);
90 if (!kb->key)
91 return -ENOMEM;
92 }
93 memcpy(kb->key, key, keylen);
94 kb->keylen = keylen;
95 break;
96 }
97
98 return 0;
99 }
100
_free_kb_keybuf(struct key_blob * kb)101 static inline void _free_kb_keybuf(struct key_blob *kb)
102 {
103 if (kb->key && kb->key != kb->keybuf
104 && kb->keylen > sizeof(kb->keybuf)) {
105 kfree(kb->key);
106 kb->key = NULL;
107 }
108 }
109
110 struct s390_paes_ctx {
111 struct key_blob kb;
112 struct pkey_protkey pk;
113 spinlock_t pk_lock;
114 unsigned long fc;
115 };
116
117 struct s390_pxts_ctx {
118 struct key_blob kb[2];
119 struct pkey_protkey pk[2];
120 spinlock_t pk_lock;
121 unsigned long fc;
122 };
123
__paes_keyblob2pkey(struct key_blob * kb,struct pkey_protkey * pk)124 static inline int __paes_keyblob2pkey(struct key_blob *kb,
125 struct pkey_protkey *pk)
126 {
127 int i, ret;
128
129 /* try three times in case of failure */
130 for (i = 0; i < 3; i++) {
131 ret = pkey_keyblob2pkey(kb->key, kb->keylen, pk);
132 if (ret == 0)
133 break;
134 }
135
136 return ret;
137 }
138
__paes_convert_key(struct s390_paes_ctx * ctx)139 static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
140 {
141 struct pkey_protkey pkey;
142
143 if (__paes_keyblob2pkey(&ctx->kb, &pkey))
144 return -EINVAL;
145
146 spin_lock_bh(&ctx->pk_lock);
147 memcpy(&ctx->pk, &pkey, sizeof(pkey));
148 spin_unlock_bh(&ctx->pk_lock);
149
150 return 0;
151 }
152
ecb_paes_init(struct crypto_skcipher * tfm)153 static int ecb_paes_init(struct crypto_skcipher *tfm)
154 {
155 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
156
157 ctx->kb.key = NULL;
158 spin_lock_init(&ctx->pk_lock);
159
160 return 0;
161 }
162
ecb_paes_exit(struct crypto_skcipher * tfm)163 static void ecb_paes_exit(struct crypto_skcipher *tfm)
164 {
165 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
166
167 _free_kb_keybuf(&ctx->kb);
168 }
169
__ecb_paes_set_key(struct s390_paes_ctx * ctx)170 static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx)
171 {
172 unsigned long fc;
173
174 if (__paes_convert_key(ctx))
175 return -EINVAL;
176
177 /* Pick the correct function code based on the protected key type */
178 fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
179 (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 :
180 (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
181
182 /* Check if the function code is available */
183 ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
184
185 return ctx->fc ? 0 : -EINVAL;
186 }
187
ecb_paes_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)188 static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
189 unsigned int key_len)
190 {
191 int rc;
192 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
193
194 _free_kb_keybuf(&ctx->kb);
195 rc = _key_to_kb(&ctx->kb, in_key, key_len);
196 if (rc)
197 return rc;
198
199 return __ecb_paes_set_key(ctx);
200 }
201
ecb_paes_crypt(struct skcipher_request * req,unsigned long modifier)202 static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
203 {
204 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
205 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
206 struct skcipher_walk walk;
207 unsigned int nbytes, n, k;
208 int ret;
209 struct {
210 u8 key[MAXPROTKEYSIZE];
211 } param;
212
213 ret = skcipher_walk_virt(&walk, req, false);
214 if (ret)
215 return ret;
216
217 spin_lock_bh(&ctx->pk_lock);
218 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
219 spin_unlock_bh(&ctx->pk_lock);
220
221 while ((nbytes = walk.nbytes) != 0) {
222 /* only use complete blocks */
223 n = nbytes & ~(AES_BLOCK_SIZE - 1);
224 k = cpacf_km(ctx->fc | modifier, ¶m,
225 walk.dst.virt.addr, walk.src.virt.addr, n);
226 if (k)
227 ret = skcipher_walk_done(&walk, nbytes - k);
228 if (k < n) {
229 if (__paes_convert_key(ctx))
230 return skcipher_walk_done(&walk, -EIO);
231 spin_lock_bh(&ctx->pk_lock);
232 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
233 spin_unlock_bh(&ctx->pk_lock);
234 }
235 }
236 return ret;
237 }
238
ecb_paes_encrypt(struct skcipher_request * req)239 static int ecb_paes_encrypt(struct skcipher_request *req)
240 {
241 return ecb_paes_crypt(req, 0);
242 }
243
ecb_paes_decrypt(struct skcipher_request * req)244 static int ecb_paes_decrypt(struct skcipher_request *req)
245 {
246 return ecb_paes_crypt(req, CPACF_DECRYPT);
247 }
248
249 static struct skcipher_alg ecb_paes_alg = {
250 .base.cra_name = "ecb(paes)",
251 .base.cra_driver_name = "ecb-paes-s390",
252 .base.cra_priority = 401, /* combo: aes + ecb + 1 */
253 .base.cra_blocksize = AES_BLOCK_SIZE,
254 .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
255 .base.cra_module = THIS_MODULE,
256 .base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.cra_list),
257 .init = ecb_paes_init,
258 .exit = ecb_paes_exit,
259 .min_keysize = PAES_MIN_KEYSIZE,
260 .max_keysize = PAES_MAX_KEYSIZE,
261 .setkey = ecb_paes_set_key,
262 .encrypt = ecb_paes_encrypt,
263 .decrypt = ecb_paes_decrypt,
264 };
265
cbc_paes_init(struct crypto_skcipher * tfm)266 static int cbc_paes_init(struct crypto_skcipher *tfm)
267 {
268 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
269
270 ctx->kb.key = NULL;
271 spin_lock_init(&ctx->pk_lock);
272
273 return 0;
274 }
275
cbc_paes_exit(struct crypto_skcipher * tfm)276 static void cbc_paes_exit(struct crypto_skcipher *tfm)
277 {
278 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
279
280 _free_kb_keybuf(&ctx->kb);
281 }
282
__cbc_paes_set_key(struct s390_paes_ctx * ctx)283 static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
284 {
285 unsigned long fc;
286
287 if (__paes_convert_key(ctx))
288 return -EINVAL;
289
290 /* Pick the correct function code based on the protected key type */
291 fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 :
292 (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMC_PAES_192 :
293 (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KMC_PAES_256 : 0;
294
295 /* Check if the function code is available */
296 ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
297
298 return ctx->fc ? 0 : -EINVAL;
299 }
300
cbc_paes_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)301 static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
302 unsigned int key_len)
303 {
304 int rc;
305 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
306
307 _free_kb_keybuf(&ctx->kb);
308 rc = _key_to_kb(&ctx->kb, in_key, key_len);
309 if (rc)
310 return rc;
311
312 return __cbc_paes_set_key(ctx);
313 }
314
cbc_paes_crypt(struct skcipher_request * req,unsigned long modifier)315 static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
316 {
317 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
318 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
319 struct skcipher_walk walk;
320 unsigned int nbytes, n, k;
321 int ret;
322 struct {
323 u8 iv[AES_BLOCK_SIZE];
324 u8 key[MAXPROTKEYSIZE];
325 } param;
326
327 ret = skcipher_walk_virt(&walk, req, false);
328 if (ret)
329 return ret;
330
331 memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
332 spin_lock_bh(&ctx->pk_lock);
333 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
334 spin_unlock_bh(&ctx->pk_lock);
335
336 while ((nbytes = walk.nbytes) != 0) {
337 /* only use complete blocks */
338 n = nbytes & ~(AES_BLOCK_SIZE - 1);
339 k = cpacf_kmc(ctx->fc | modifier, ¶m,
340 walk.dst.virt.addr, walk.src.virt.addr, n);
341 if (k) {
342 memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
343 ret = skcipher_walk_done(&walk, nbytes - k);
344 }
345 if (k < n) {
346 if (__paes_convert_key(ctx))
347 return skcipher_walk_done(&walk, -EIO);
348 spin_lock_bh(&ctx->pk_lock);
349 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
350 spin_unlock_bh(&ctx->pk_lock);
351 }
352 }
353 return ret;
354 }
355
cbc_paes_encrypt(struct skcipher_request * req)356 static int cbc_paes_encrypt(struct skcipher_request *req)
357 {
358 return cbc_paes_crypt(req, 0);
359 }
360
cbc_paes_decrypt(struct skcipher_request * req)361 static int cbc_paes_decrypt(struct skcipher_request *req)
362 {
363 return cbc_paes_crypt(req, CPACF_DECRYPT);
364 }
365
366 static struct skcipher_alg cbc_paes_alg = {
367 .base.cra_name = "cbc(paes)",
368 .base.cra_driver_name = "cbc-paes-s390",
369 .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
370 .base.cra_blocksize = AES_BLOCK_SIZE,
371 .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
372 .base.cra_module = THIS_MODULE,
373 .base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.cra_list),
374 .init = cbc_paes_init,
375 .exit = cbc_paes_exit,
376 .min_keysize = PAES_MIN_KEYSIZE,
377 .max_keysize = PAES_MAX_KEYSIZE,
378 .ivsize = AES_BLOCK_SIZE,
379 .setkey = cbc_paes_set_key,
380 .encrypt = cbc_paes_encrypt,
381 .decrypt = cbc_paes_decrypt,
382 };
383
xts_paes_init(struct crypto_skcipher * tfm)384 static int xts_paes_init(struct crypto_skcipher *tfm)
385 {
386 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
387
388 ctx->kb[0].key = NULL;
389 ctx->kb[1].key = NULL;
390 spin_lock_init(&ctx->pk_lock);
391
392 return 0;
393 }
394
xts_paes_exit(struct crypto_skcipher * tfm)395 static void xts_paes_exit(struct crypto_skcipher *tfm)
396 {
397 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
398
399 _free_kb_keybuf(&ctx->kb[0]);
400 _free_kb_keybuf(&ctx->kb[1]);
401 }
402
__xts_paes_convert_key(struct s390_pxts_ctx * ctx)403 static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx)
404 {
405 struct pkey_protkey pkey0, pkey1;
406
407 if (__paes_keyblob2pkey(&ctx->kb[0], &pkey0) ||
408 __paes_keyblob2pkey(&ctx->kb[1], &pkey1))
409 return -EINVAL;
410
411 spin_lock_bh(&ctx->pk_lock);
412 memcpy(&ctx->pk[0], &pkey0, sizeof(pkey0));
413 memcpy(&ctx->pk[1], &pkey1, sizeof(pkey1));
414 spin_unlock_bh(&ctx->pk_lock);
415
416 return 0;
417 }
418
__xts_paes_set_key(struct s390_pxts_ctx * ctx)419 static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
420 {
421 unsigned long fc;
422
423 if (__xts_paes_convert_key(ctx))
424 return -EINVAL;
425
426 if (ctx->pk[0].type != ctx->pk[1].type)
427 return -EINVAL;
428
429 /* Pick the correct function code based on the protected key type */
430 fc = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PXTS_128 :
431 (ctx->pk[0].type == PKEY_KEYTYPE_AES_256) ?
432 CPACF_KM_PXTS_256 : 0;
433
434 /* Check if the function code is available */
435 ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
436
437 return ctx->fc ? 0 : -EINVAL;
438 }
439
xts_paes_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int xts_key_len)440 static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
441 unsigned int xts_key_len)
442 {
443 int rc;
444 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
445 u8 ckey[2 * AES_MAX_KEY_SIZE];
446 unsigned int ckey_len, key_len;
447
448 if (xts_key_len % 2)
449 return -EINVAL;
450
451 key_len = xts_key_len / 2;
452
453 _free_kb_keybuf(&ctx->kb[0]);
454 _free_kb_keybuf(&ctx->kb[1]);
455 rc = _key_to_kb(&ctx->kb[0], in_key, key_len);
456 if (rc)
457 return rc;
458 rc = _key_to_kb(&ctx->kb[1], in_key + key_len, key_len);
459 if (rc)
460 return rc;
461
462 rc = __xts_paes_set_key(ctx);
463 if (rc)
464 return rc;
465
466 /*
467 * xts_check_key verifies the key length is not odd and makes
468 * sure that the two keys are not the same. This can be done
469 * on the two protected keys as well
470 */
471 ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ?
472 AES_KEYSIZE_128 : AES_KEYSIZE_256;
473 memcpy(ckey, ctx->pk[0].protkey, ckey_len);
474 memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len);
475 return xts_verify_key(tfm, ckey, 2*ckey_len);
476 }
477
xts_paes_crypt(struct skcipher_request * req,unsigned long modifier)478 static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
479 {
480 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
481 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
482 struct skcipher_walk walk;
483 unsigned int keylen, offset, nbytes, n, k;
484 int ret;
485 struct {
486 u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */
487 u8 tweak[16];
488 u8 block[16];
489 u8 bit[16];
490 u8 xts[16];
491 } pcc_param;
492 struct {
493 u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */
494 u8 init[16];
495 } xts_param;
496
497 ret = skcipher_walk_virt(&walk, req, false);
498 if (ret)
499 return ret;
500
501 keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
502 offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
503
504 memset(&pcc_param, 0, sizeof(pcc_param));
505 memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
506 spin_lock_bh(&ctx->pk_lock);
507 memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
508 memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
509 spin_unlock_bh(&ctx->pk_lock);
510 cpacf_pcc(ctx->fc, pcc_param.key + offset);
511 memcpy(xts_param.init, pcc_param.xts, 16);
512
513 while ((nbytes = walk.nbytes) != 0) {
514 /* only use complete blocks */
515 n = nbytes & ~(AES_BLOCK_SIZE - 1);
516 k = cpacf_km(ctx->fc | modifier, xts_param.key + offset,
517 walk.dst.virt.addr, walk.src.virt.addr, n);
518 if (k)
519 ret = skcipher_walk_done(&walk, nbytes - k);
520 if (k < n) {
521 if (__xts_paes_convert_key(ctx))
522 return skcipher_walk_done(&walk, -EIO);
523 spin_lock_bh(&ctx->pk_lock);
524 memcpy(xts_param.key + offset,
525 ctx->pk[0].protkey, keylen);
526 spin_unlock_bh(&ctx->pk_lock);
527 }
528 }
529
530 return ret;
531 }
532
xts_paes_encrypt(struct skcipher_request * req)533 static int xts_paes_encrypt(struct skcipher_request *req)
534 {
535 return xts_paes_crypt(req, 0);
536 }
537
xts_paes_decrypt(struct skcipher_request * req)538 static int xts_paes_decrypt(struct skcipher_request *req)
539 {
540 return xts_paes_crypt(req, CPACF_DECRYPT);
541 }
542
543 static struct skcipher_alg xts_paes_alg = {
544 .base.cra_name = "xts(paes)",
545 .base.cra_driver_name = "xts-paes-s390",
546 .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
547 .base.cra_blocksize = AES_BLOCK_SIZE,
548 .base.cra_ctxsize = sizeof(struct s390_pxts_ctx),
549 .base.cra_module = THIS_MODULE,
550 .base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.cra_list),
551 .init = xts_paes_init,
552 .exit = xts_paes_exit,
553 .min_keysize = 2 * PAES_MIN_KEYSIZE,
554 .max_keysize = 2 * PAES_MAX_KEYSIZE,
555 .ivsize = AES_BLOCK_SIZE,
556 .setkey = xts_paes_set_key,
557 .encrypt = xts_paes_encrypt,
558 .decrypt = xts_paes_decrypt,
559 };
560
ctr_paes_init(struct crypto_skcipher * tfm)561 static int ctr_paes_init(struct crypto_skcipher *tfm)
562 {
563 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
564
565 ctx->kb.key = NULL;
566 spin_lock_init(&ctx->pk_lock);
567
568 return 0;
569 }
570
ctr_paes_exit(struct crypto_skcipher * tfm)571 static void ctr_paes_exit(struct crypto_skcipher *tfm)
572 {
573 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
574
575 _free_kb_keybuf(&ctx->kb);
576 }
577
__ctr_paes_set_key(struct s390_paes_ctx * ctx)578 static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
579 {
580 unsigned long fc;
581
582 if (__paes_convert_key(ctx))
583 return -EINVAL;
584
585 /* Pick the correct function code based on the protected key type */
586 fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 :
587 (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMCTR_PAES_192 :
588 (ctx->pk.type == PKEY_KEYTYPE_AES_256) ?
589 CPACF_KMCTR_PAES_256 : 0;
590
591 /* Check if the function code is available */
592 ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
593
594 return ctx->fc ? 0 : -EINVAL;
595 }
596
ctr_paes_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)597 static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
598 unsigned int key_len)
599 {
600 int rc;
601 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
602
603 _free_kb_keybuf(&ctx->kb);
604 rc = _key_to_kb(&ctx->kb, in_key, key_len);
605 if (rc)
606 return rc;
607
608 return __ctr_paes_set_key(ctx);
609 }
610
__ctrblk_init(u8 * ctrptr,u8 * iv,unsigned int nbytes)611 static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
612 {
613 unsigned int i, n;
614
615 /* only use complete blocks, max. PAGE_SIZE */
616 memcpy(ctrptr, iv, AES_BLOCK_SIZE);
617 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
618 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
619 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
620 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
621 ctrptr += AES_BLOCK_SIZE;
622 }
623 return n;
624 }
625
ctr_paes_crypt(struct skcipher_request * req)626 static int ctr_paes_crypt(struct skcipher_request *req)
627 {
628 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
629 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
630 u8 buf[AES_BLOCK_SIZE], *ctrptr;
631 struct skcipher_walk walk;
632 unsigned int nbytes, n, k;
633 int ret, locked;
634 struct {
635 u8 key[MAXPROTKEYSIZE];
636 } param;
637
638 ret = skcipher_walk_virt(&walk, req, false);
639 if (ret)
640 return ret;
641
642 spin_lock_bh(&ctx->pk_lock);
643 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
644 spin_unlock_bh(&ctx->pk_lock);
645
646 locked = mutex_trylock(&ctrblk_lock);
647
648 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
649 n = AES_BLOCK_SIZE;
650 if (nbytes >= 2*AES_BLOCK_SIZE && locked)
651 n = __ctrblk_init(ctrblk, walk.iv, nbytes);
652 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
653 k = cpacf_kmctr(ctx->fc, ¶m, walk.dst.virt.addr,
654 walk.src.virt.addr, n, ctrptr);
655 if (k) {
656 if (ctrptr == ctrblk)
657 memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE,
658 AES_BLOCK_SIZE);
659 crypto_inc(walk.iv, AES_BLOCK_SIZE);
660 ret = skcipher_walk_done(&walk, nbytes - k);
661 }
662 if (k < n) {
663 if (__paes_convert_key(ctx)) {
664 if (locked)
665 mutex_unlock(&ctrblk_lock);
666 return skcipher_walk_done(&walk, -EIO);
667 }
668 spin_lock_bh(&ctx->pk_lock);
669 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
670 spin_unlock_bh(&ctx->pk_lock);
671 }
672 }
673 if (locked)
674 mutex_unlock(&ctrblk_lock);
675 /*
676 * final block may be < AES_BLOCK_SIZE, copy only nbytes
677 */
678 if (nbytes) {
679 while (1) {
680 if (cpacf_kmctr(ctx->fc, ¶m, buf,
681 walk.src.virt.addr, AES_BLOCK_SIZE,
682 walk.iv) == AES_BLOCK_SIZE)
683 break;
684 if (__paes_convert_key(ctx))
685 return skcipher_walk_done(&walk, -EIO);
686 spin_lock_bh(&ctx->pk_lock);
687 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
688 spin_unlock_bh(&ctx->pk_lock);
689 }
690 memcpy(walk.dst.virt.addr, buf, nbytes);
691 crypto_inc(walk.iv, AES_BLOCK_SIZE);
692 ret = skcipher_walk_done(&walk, nbytes);
693 }
694
695 return ret;
696 }
697
698 static struct skcipher_alg ctr_paes_alg = {
699 .base.cra_name = "ctr(paes)",
700 .base.cra_driver_name = "ctr-paes-s390",
701 .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
702 .base.cra_blocksize = 1,
703 .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
704 .base.cra_module = THIS_MODULE,
705 .base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.cra_list),
706 .init = ctr_paes_init,
707 .exit = ctr_paes_exit,
708 .min_keysize = PAES_MIN_KEYSIZE,
709 .max_keysize = PAES_MAX_KEYSIZE,
710 .ivsize = AES_BLOCK_SIZE,
711 .setkey = ctr_paes_set_key,
712 .encrypt = ctr_paes_crypt,
713 .decrypt = ctr_paes_crypt,
714 .chunksize = AES_BLOCK_SIZE,
715 };
716
__crypto_unregister_skcipher(struct skcipher_alg * alg)717 static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg)
718 {
719 if (!list_empty(&alg->base.cra_list))
720 crypto_unregister_skcipher(alg);
721 }
722
paes_s390_fini(void)723 static void paes_s390_fini(void)
724 {
725 __crypto_unregister_skcipher(&ctr_paes_alg);
726 __crypto_unregister_skcipher(&xts_paes_alg);
727 __crypto_unregister_skcipher(&cbc_paes_alg);
728 __crypto_unregister_skcipher(&ecb_paes_alg);
729 if (ctrblk)
730 free_page((unsigned long) ctrblk);
731 }
732
paes_s390_init(void)733 static int __init paes_s390_init(void)
734 {
735 int ret;
736
737 /* Query available functions for KM, KMC and KMCTR */
738 cpacf_query(CPACF_KM, &km_functions);
739 cpacf_query(CPACF_KMC, &kmc_functions);
740 cpacf_query(CPACF_KMCTR, &kmctr_functions);
741
742 if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) ||
743 cpacf_test_func(&km_functions, CPACF_KM_PAES_192) ||
744 cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) {
745 ret = crypto_register_skcipher(&ecb_paes_alg);
746 if (ret)
747 goto out_err;
748 }
749
750 if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
751 cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
752 cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) {
753 ret = crypto_register_skcipher(&cbc_paes_alg);
754 if (ret)
755 goto out_err;
756 }
757
758 if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) ||
759 cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) {
760 ret = crypto_register_skcipher(&xts_paes_alg);
761 if (ret)
762 goto out_err;
763 }
764
765 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
766 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
767 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
768 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
769 if (!ctrblk) {
770 ret = -ENOMEM;
771 goto out_err;
772 }
773 ret = crypto_register_skcipher(&ctr_paes_alg);
774 if (ret)
775 goto out_err;
776 }
777
778 return 0;
779 out_err:
780 paes_s390_fini();
781 return ret;
782 }
783
784 module_init(paes_s390_init);
785 module_exit(paes_s390_fini);
786
787 MODULE_ALIAS_CRYPTO("paes");
788
789 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys");
790 MODULE_LICENSE("GPL");
791