• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Block chaining cipher operations.
3  *
4  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5  * multiple page boundaries by using temporary blocks.  In user context,
6  * the kernel is given a chance to schedule us once per page.
7  *
8  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License as published by the Free
12  * Software Foundation; either version 2 of the License, or (at your option)
13  * any later version.
14  *
15  */
16 
17 #include <crypto/internal/skcipher.h>
18 #include <crypto/scatterwalk.h>
19 #include <linux/errno.h>
20 #include <linux/hardirq.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/scatterlist.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/cryptouser.h>
28 #include <net/netlink.h>
29 
30 #include "internal.h"
31 
32 enum {
33 	BLKCIPHER_WALK_PHYS = 1 << 0,
34 	BLKCIPHER_WALK_SLOW = 1 << 1,
35 	BLKCIPHER_WALK_COPY = 1 << 2,
36 	BLKCIPHER_WALK_DIFF = 1 << 3,
37 };
38 
39 static int blkcipher_walk_next(struct blkcipher_desc *desc,
40 			       struct blkcipher_walk *walk);
41 static int blkcipher_walk_first(struct blkcipher_desc *desc,
42 				struct blkcipher_walk *walk);
43 
blkcipher_map_src(struct blkcipher_walk * walk)44 static inline void blkcipher_map_src(struct blkcipher_walk *walk)
45 {
46 	walk->src.virt.addr = scatterwalk_map(&walk->in);
47 }
48 
blkcipher_map_dst(struct blkcipher_walk * walk)49 static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
50 {
51 	walk->dst.virt.addr = scatterwalk_map(&walk->out);
52 }
53 
blkcipher_unmap_src(struct blkcipher_walk * walk)54 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
55 {
56 	scatterwalk_unmap(walk->src.virt.addr);
57 }
58 
blkcipher_unmap_dst(struct blkcipher_walk * walk)59 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
60 {
61 	scatterwalk_unmap(walk->dst.virt.addr);
62 }
63 
64 /* Get a spot of the specified length that does not straddle a page.
65  * The caller needs to ensure that there is enough space for this operation.
66  */
blkcipher_get_spot(u8 * start,unsigned int len)67 static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
68 {
69 	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
70 	return max(start, end_page);
71 }
72 
blkcipher_done_slow(struct blkcipher_walk * walk,unsigned int bsize)73 static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
74 					       unsigned int bsize)
75 {
76 	u8 *addr;
77 
78 	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
79 	addr = blkcipher_get_spot(addr, bsize);
80 	scatterwalk_copychunks(addr, &walk->out, bsize, 1);
81 	return bsize;
82 }
83 
blkcipher_done_fast(struct blkcipher_walk * walk,unsigned int n)84 static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
85 					       unsigned int n)
86 {
87 	if (walk->flags & BLKCIPHER_WALK_COPY) {
88 		blkcipher_map_dst(walk);
89 		memcpy(walk->dst.virt.addr, walk->page, n);
90 		blkcipher_unmap_dst(walk);
91 	} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
92 		if (walk->flags & BLKCIPHER_WALK_DIFF)
93 			blkcipher_unmap_dst(walk);
94 		blkcipher_unmap_src(walk);
95 	}
96 
97 	scatterwalk_advance(&walk->in, n);
98 	scatterwalk_advance(&walk->out, n);
99 
100 	return n;
101 }
102 
blkcipher_walk_done(struct blkcipher_desc * desc,struct blkcipher_walk * walk,int err)103 int blkcipher_walk_done(struct blkcipher_desc *desc,
104 			struct blkcipher_walk *walk, int err)
105 {
106 	unsigned int nbytes = 0;
107 
108 	if (likely(err >= 0)) {
109 		unsigned int n = walk->nbytes - err;
110 
111 		if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
112 			n = blkcipher_done_fast(walk, n);
113 		else if (WARN_ON(err)) {
114 			err = -EINVAL;
115 			goto err;
116 		} else
117 			n = blkcipher_done_slow(walk, n);
118 
119 		nbytes = walk->total - n;
120 		err = 0;
121 	}
122 
123 	scatterwalk_done(&walk->in, 0, nbytes);
124 	scatterwalk_done(&walk->out, 1, nbytes);
125 
126 err:
127 	walk->total = nbytes;
128 	walk->nbytes = nbytes;
129 
130 	if (nbytes) {
131 		crypto_yield(desc->flags);
132 		return blkcipher_walk_next(desc, walk);
133 	}
134 
135 	if (walk->iv != desc->info)
136 		memcpy(desc->info, walk->iv, walk->ivsize);
137 	if (walk->buffer != walk->page)
138 		kfree(walk->buffer);
139 	if (walk->page)
140 		free_page((unsigned long)walk->page);
141 
142 	return err;
143 }
144 EXPORT_SYMBOL_GPL(blkcipher_walk_done);
145 
blkcipher_next_slow(struct blkcipher_desc * desc,struct blkcipher_walk * walk,unsigned int bsize,unsigned int alignmask)146 static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
147 				      struct blkcipher_walk *walk,
148 				      unsigned int bsize,
149 				      unsigned int alignmask)
150 {
151 	unsigned int n;
152 	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
153 
154 	if (walk->buffer)
155 		goto ok;
156 
157 	walk->buffer = walk->page;
158 	if (walk->buffer)
159 		goto ok;
160 
161 	n = aligned_bsize * 3 - (alignmask + 1) +
162 	    (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
163 	walk->buffer = kmalloc(n, GFP_ATOMIC);
164 	if (!walk->buffer)
165 		return blkcipher_walk_done(desc, walk, -ENOMEM);
166 
167 ok:
168 	walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
169 					  alignmask + 1);
170 	walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
171 	walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
172 						 aligned_bsize, bsize);
173 
174 	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
175 
176 	walk->nbytes = bsize;
177 	walk->flags |= BLKCIPHER_WALK_SLOW;
178 
179 	return 0;
180 }
181 
blkcipher_next_copy(struct blkcipher_walk * walk)182 static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
183 {
184 	u8 *tmp = walk->page;
185 
186 	blkcipher_map_src(walk);
187 	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
188 	blkcipher_unmap_src(walk);
189 
190 	walk->src.virt.addr = tmp;
191 	walk->dst.virt.addr = tmp;
192 
193 	return 0;
194 }
195 
blkcipher_next_fast(struct blkcipher_desc * desc,struct blkcipher_walk * walk)196 static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
197 				      struct blkcipher_walk *walk)
198 {
199 	unsigned long diff;
200 
201 	walk->src.phys.page = scatterwalk_page(&walk->in);
202 	walk->src.phys.offset = offset_in_page(walk->in.offset);
203 	walk->dst.phys.page = scatterwalk_page(&walk->out);
204 	walk->dst.phys.offset = offset_in_page(walk->out.offset);
205 
206 	if (walk->flags & BLKCIPHER_WALK_PHYS)
207 		return 0;
208 
209 	diff = walk->src.phys.offset - walk->dst.phys.offset;
210 	diff |= walk->src.virt.page - walk->dst.virt.page;
211 
212 	blkcipher_map_src(walk);
213 	walk->dst.virt.addr = walk->src.virt.addr;
214 
215 	if (diff) {
216 		walk->flags |= BLKCIPHER_WALK_DIFF;
217 		blkcipher_map_dst(walk);
218 	}
219 
220 	return 0;
221 }
222 
blkcipher_walk_next(struct blkcipher_desc * desc,struct blkcipher_walk * walk)223 static int blkcipher_walk_next(struct blkcipher_desc *desc,
224 			       struct blkcipher_walk *walk)
225 {
226 	unsigned int bsize;
227 	unsigned int n;
228 	int err;
229 
230 	n = walk->total;
231 	if (unlikely(n < walk->cipher_blocksize)) {
232 		desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
233 		return blkcipher_walk_done(desc, walk, -EINVAL);
234 	}
235 
236 	bsize = min(walk->walk_blocksize, n);
237 
238 	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
239 			 BLKCIPHER_WALK_DIFF);
240 	if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
241 	    !scatterwalk_aligned(&walk->out, walk->alignmask)) {
242 		walk->flags |= BLKCIPHER_WALK_COPY;
243 		if (!walk->page) {
244 			walk->page = (void *)__get_free_page(GFP_ATOMIC);
245 			if (!walk->page)
246 				n = 0;
247 		}
248 	}
249 
250 	n = scatterwalk_clamp(&walk->in, n);
251 	n = scatterwalk_clamp(&walk->out, n);
252 
253 	if (unlikely(n < bsize)) {
254 		err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
255 		goto set_phys_lowmem;
256 	}
257 
258 	walk->nbytes = n;
259 	if (walk->flags & BLKCIPHER_WALK_COPY) {
260 		err = blkcipher_next_copy(walk);
261 		goto set_phys_lowmem;
262 	}
263 
264 	return blkcipher_next_fast(desc, walk);
265 
266 set_phys_lowmem:
267 	if (walk->flags & BLKCIPHER_WALK_PHYS) {
268 		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
269 		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
270 		walk->src.phys.offset &= PAGE_SIZE - 1;
271 		walk->dst.phys.offset &= PAGE_SIZE - 1;
272 	}
273 	return err;
274 }
275 
blkcipher_copy_iv(struct blkcipher_walk * walk)276 static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
277 {
278 	unsigned bs = walk->walk_blocksize;
279 	unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
280 	unsigned int size = aligned_bs * 2 +
281 			    walk->ivsize + max(aligned_bs, walk->ivsize) -
282 			    (walk->alignmask + 1);
283 	u8 *iv;
284 
285 	size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
286 	walk->buffer = kmalloc(size, GFP_ATOMIC);
287 	if (!walk->buffer)
288 		return -ENOMEM;
289 
290 	iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
291 	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
292 	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
293 	iv = blkcipher_get_spot(iv, walk->ivsize);
294 
295 	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
296 	return 0;
297 }
298 
blkcipher_walk_virt(struct blkcipher_desc * desc,struct blkcipher_walk * walk)299 int blkcipher_walk_virt(struct blkcipher_desc *desc,
300 			struct blkcipher_walk *walk)
301 {
302 	walk->flags &= ~BLKCIPHER_WALK_PHYS;
303 	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
304 	walk->cipher_blocksize = walk->walk_blocksize;
305 	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
306 	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
307 	return blkcipher_walk_first(desc, walk);
308 }
309 EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
310 
blkcipher_walk_phys(struct blkcipher_desc * desc,struct blkcipher_walk * walk)311 int blkcipher_walk_phys(struct blkcipher_desc *desc,
312 			struct blkcipher_walk *walk)
313 {
314 	walk->flags |= BLKCIPHER_WALK_PHYS;
315 	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
316 	walk->cipher_blocksize = walk->walk_blocksize;
317 	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
318 	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
319 	return blkcipher_walk_first(desc, walk);
320 }
321 EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
322 
blkcipher_walk_first(struct blkcipher_desc * desc,struct blkcipher_walk * walk)323 static int blkcipher_walk_first(struct blkcipher_desc *desc,
324 				struct blkcipher_walk *walk)
325 {
326 	if (WARN_ON_ONCE(in_irq()))
327 		return -EDEADLK;
328 
329 	walk->nbytes = walk->total;
330 	if (unlikely(!walk->total))
331 		return 0;
332 
333 	walk->buffer = NULL;
334 	walk->iv = desc->info;
335 	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
336 		int err = blkcipher_copy_iv(walk);
337 		if (err)
338 			return err;
339 	}
340 
341 	scatterwalk_start(&walk->in, walk->in.sg);
342 	scatterwalk_start(&walk->out, walk->out.sg);
343 	walk->page = NULL;
344 
345 	return blkcipher_walk_next(desc, walk);
346 }
347 
blkcipher_walk_virt_block(struct blkcipher_desc * desc,struct blkcipher_walk * walk,unsigned int blocksize)348 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
349 			      struct blkcipher_walk *walk,
350 			      unsigned int blocksize)
351 {
352 	walk->flags &= ~BLKCIPHER_WALK_PHYS;
353 	walk->walk_blocksize = blocksize;
354 	walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
355 	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
356 	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
357 	return blkcipher_walk_first(desc, walk);
358 }
359 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
360 
blkcipher_aead_walk_virt_block(struct blkcipher_desc * desc,struct blkcipher_walk * walk,struct crypto_aead * tfm,unsigned int blocksize)361 int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
362 				   struct blkcipher_walk *walk,
363 				   struct crypto_aead *tfm,
364 				   unsigned int blocksize)
365 {
366 	walk->flags &= ~BLKCIPHER_WALK_PHYS;
367 	walk->walk_blocksize = blocksize;
368 	walk->cipher_blocksize = crypto_aead_blocksize(tfm);
369 	walk->ivsize = crypto_aead_ivsize(tfm);
370 	walk->alignmask = crypto_aead_alignmask(tfm);
371 	return blkcipher_walk_first(desc, walk);
372 }
373 EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
374 
setkey_unaligned(struct crypto_tfm * tfm,const u8 * key,unsigned int keylen)375 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
376 			    unsigned int keylen)
377 {
378 	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
379 	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
380 	int ret;
381 	u8 *buffer, *alignbuffer;
382 	unsigned long absize;
383 
384 	absize = keylen + alignmask;
385 	buffer = kmalloc(absize, GFP_ATOMIC);
386 	if (!buffer)
387 		return -ENOMEM;
388 
389 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
390 	memcpy(alignbuffer, key, keylen);
391 	ret = cipher->setkey(tfm, alignbuffer, keylen);
392 	memset(alignbuffer, 0, keylen);
393 	kfree(buffer);
394 	return ret;
395 }
396 
setkey(struct crypto_tfm * tfm,const u8 * key,unsigned int keylen)397 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
398 {
399 	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
400 	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
401 
402 	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
403 		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
404 		return -EINVAL;
405 	}
406 
407 	if ((unsigned long)key & alignmask)
408 		return setkey_unaligned(tfm, key, keylen);
409 
410 	return cipher->setkey(tfm, key, keylen);
411 }
412 
async_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)413 static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
414 			unsigned int keylen)
415 {
416 	return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
417 }
418 
async_encrypt(struct ablkcipher_request * req)419 static int async_encrypt(struct ablkcipher_request *req)
420 {
421 	struct crypto_tfm *tfm = req->base.tfm;
422 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
423 	struct blkcipher_desc desc = {
424 		.tfm = __crypto_blkcipher_cast(tfm),
425 		.info = req->info,
426 		.flags = req->base.flags,
427 	};
428 
429 
430 	return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
431 }
432 
async_decrypt(struct ablkcipher_request * req)433 static int async_decrypt(struct ablkcipher_request *req)
434 {
435 	struct crypto_tfm *tfm = req->base.tfm;
436 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
437 	struct blkcipher_desc desc = {
438 		.tfm = __crypto_blkcipher_cast(tfm),
439 		.info = req->info,
440 		.flags = req->base.flags,
441 	};
442 
443 	return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
444 }
445 
crypto_blkcipher_ctxsize(struct crypto_alg * alg,u32 type,u32 mask)446 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
447 					     u32 mask)
448 {
449 	struct blkcipher_alg *cipher = &alg->cra_blkcipher;
450 	unsigned int len = alg->cra_ctxsize;
451 
452 	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
453 	    cipher->ivsize) {
454 		len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
455 		len += cipher->ivsize;
456 	}
457 
458 	return len;
459 }
460 
crypto_init_blkcipher_ops_async(struct crypto_tfm * tfm)461 static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
462 {
463 	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
464 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
465 
466 	crt->setkey = async_setkey;
467 	crt->encrypt = async_encrypt;
468 	crt->decrypt = async_decrypt;
469 	if (!alg->ivsize) {
470 		crt->givencrypt = skcipher_null_givencrypt;
471 		crt->givdecrypt = skcipher_null_givdecrypt;
472 	}
473 	crt->base = __crypto_ablkcipher_cast(tfm);
474 	crt->ivsize = alg->ivsize;
475 
476 	return 0;
477 }
478 
crypto_init_blkcipher_ops_sync(struct crypto_tfm * tfm)479 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
480 {
481 	struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
482 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
483 	unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
484 	unsigned long addr;
485 
486 	crt->setkey = setkey;
487 	crt->encrypt = alg->encrypt;
488 	crt->decrypt = alg->decrypt;
489 
490 	addr = (unsigned long)crypto_tfm_ctx(tfm);
491 	addr = ALIGN(addr, align);
492 	addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
493 	crt->iv = (void *)addr;
494 
495 	return 0;
496 }
497 
crypto_init_blkcipher_ops(struct crypto_tfm * tfm,u32 type,u32 mask)498 static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
499 {
500 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
501 
502 	if (alg->ivsize > PAGE_SIZE / 8)
503 		return -EINVAL;
504 
505 	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
506 		return crypto_init_blkcipher_ops_sync(tfm);
507 	else
508 		return crypto_init_blkcipher_ops_async(tfm);
509 }
510 
511 #ifdef CONFIG_NET
crypto_blkcipher_report(struct sk_buff * skb,struct crypto_alg * alg)512 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
513 {
514 	struct crypto_report_blkcipher rblkcipher;
515 
516 	strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
517 	strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
518 		sizeof(rblkcipher.geniv));
519 
520 	rblkcipher.blocksize = alg->cra_blocksize;
521 	rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
522 	rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
523 	rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
524 
525 	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
526 		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
527 		goto nla_put_failure;
528 	return 0;
529 
530 nla_put_failure:
531 	return -EMSGSIZE;
532 }
533 #else
crypto_blkcipher_report(struct sk_buff * skb,struct crypto_alg * alg)534 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
535 {
536 	return -ENOSYS;
537 }
538 #endif
539 
540 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
541 	__attribute__ ((unused));
crypto_blkcipher_show(struct seq_file * m,struct crypto_alg * alg)542 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
543 {
544 	seq_printf(m, "type         : blkcipher\n");
545 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
546 	seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
547 	seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
548 	seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
549 	seq_printf(m, "geniv        : %s\n", alg->cra_blkcipher.geniv ?:
550 					     "<default>");
551 }
552 
553 const struct crypto_type crypto_blkcipher_type = {
554 	.ctxsize = crypto_blkcipher_ctxsize,
555 	.init = crypto_init_blkcipher_ops,
556 #ifdef CONFIG_PROC_FS
557 	.show = crypto_blkcipher_show,
558 #endif
559 	.report = crypto_blkcipher_report,
560 };
561 EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
562 
crypto_grab_nivcipher(struct crypto_skcipher_spawn * spawn,const char * name,u32 type,u32 mask)563 static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
564 				const char *name, u32 type, u32 mask)
565 {
566 	struct crypto_alg *alg;
567 	int err;
568 
569 	type = crypto_skcipher_type(type);
570 	mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
571 
572 	alg = crypto_alg_mod_lookup(name, type, mask);
573 	if (IS_ERR(alg))
574 		return PTR_ERR(alg);
575 
576 	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
577 	crypto_mod_put(alg);
578 	return err;
579 }
580 
skcipher_geniv_alloc(struct crypto_template * tmpl,struct rtattr ** tb,u32 type,u32 mask)581 struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
582 					     struct rtattr **tb, u32 type,
583 					     u32 mask)
584 {
585 	struct {
586 		int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
587 			      unsigned int keylen);
588 		int (*encrypt)(struct ablkcipher_request *req);
589 		int (*decrypt)(struct ablkcipher_request *req);
590 
591 		unsigned int min_keysize;
592 		unsigned int max_keysize;
593 		unsigned int ivsize;
594 
595 		const char *geniv;
596 	} balg;
597 	const char *name;
598 	struct crypto_skcipher_spawn *spawn;
599 	struct crypto_attr_type *algt;
600 	struct crypto_instance *inst;
601 	struct crypto_alg *alg;
602 	int err;
603 
604 	algt = crypto_get_attr_type(tb);
605 	if (IS_ERR(algt))
606 		return ERR_CAST(algt);
607 
608 	if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
609 	    algt->mask)
610 		return ERR_PTR(-EINVAL);
611 
612 	name = crypto_attr_alg_name(tb[1]);
613 	if (IS_ERR(name))
614 		return ERR_CAST(name);
615 
616 	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
617 	if (!inst)
618 		return ERR_PTR(-ENOMEM);
619 
620 	spawn = crypto_instance_ctx(inst);
621 
622 	/* Ignore async algorithms if necessary. */
623 	mask |= crypto_requires_sync(algt->type, algt->mask);
624 
625 	crypto_set_skcipher_spawn(spawn, inst);
626 	err = crypto_grab_nivcipher(spawn, name, type, mask);
627 	if (err)
628 		goto err_free_inst;
629 
630 	alg = crypto_skcipher_spawn_alg(spawn);
631 
632 	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
633 	    CRYPTO_ALG_TYPE_BLKCIPHER) {
634 		balg.ivsize = alg->cra_blkcipher.ivsize;
635 		balg.min_keysize = alg->cra_blkcipher.min_keysize;
636 		balg.max_keysize = alg->cra_blkcipher.max_keysize;
637 
638 		balg.setkey = async_setkey;
639 		balg.encrypt = async_encrypt;
640 		balg.decrypt = async_decrypt;
641 
642 		balg.geniv = alg->cra_blkcipher.geniv;
643 	} else {
644 		balg.ivsize = alg->cra_ablkcipher.ivsize;
645 		balg.min_keysize = alg->cra_ablkcipher.min_keysize;
646 		balg.max_keysize = alg->cra_ablkcipher.max_keysize;
647 
648 		balg.setkey = alg->cra_ablkcipher.setkey;
649 		balg.encrypt = alg->cra_ablkcipher.encrypt;
650 		balg.decrypt = alg->cra_ablkcipher.decrypt;
651 
652 		balg.geniv = alg->cra_ablkcipher.geniv;
653 	}
654 
655 	err = -EINVAL;
656 	if (!balg.ivsize)
657 		goto err_drop_alg;
658 
659 	/*
660 	 * This is only true if we're constructing an algorithm with its
661 	 * default IV generator.  For the default generator we elide the
662 	 * template name and double-check the IV generator.
663 	 */
664 	if (algt->mask & CRYPTO_ALG_GENIV) {
665 		if (!balg.geniv)
666 			balg.geniv = crypto_default_geniv(alg);
667 		err = -EAGAIN;
668 		if (strcmp(tmpl->name, balg.geniv))
669 			goto err_drop_alg;
670 
671 		memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
672 		memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
673 		       CRYPTO_MAX_ALG_NAME);
674 	} else {
675 		err = -ENAMETOOLONG;
676 		if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
677 			     "%s(%s)", tmpl->name, alg->cra_name) >=
678 		    CRYPTO_MAX_ALG_NAME)
679 			goto err_drop_alg;
680 		if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
681 			     "%s(%s)", tmpl->name, alg->cra_driver_name) >=
682 		    CRYPTO_MAX_ALG_NAME)
683 			goto err_drop_alg;
684 	}
685 
686 	inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
687 	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
688 	inst->alg.cra_priority = alg->cra_priority;
689 	inst->alg.cra_blocksize = alg->cra_blocksize;
690 	inst->alg.cra_alignmask = alg->cra_alignmask;
691 	inst->alg.cra_type = &crypto_givcipher_type;
692 
693 	inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
694 	inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
695 	inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
696 	inst->alg.cra_ablkcipher.geniv = balg.geniv;
697 
698 	inst->alg.cra_ablkcipher.setkey = balg.setkey;
699 	inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
700 	inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
701 
702 out:
703 	return inst;
704 
705 err_drop_alg:
706 	crypto_drop_skcipher(spawn);
707 err_free_inst:
708 	kfree(inst);
709 	inst = ERR_PTR(err);
710 	goto out;
711 }
712 EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
713 
skcipher_geniv_free(struct crypto_instance * inst)714 void skcipher_geniv_free(struct crypto_instance *inst)
715 {
716 	crypto_drop_skcipher(crypto_instance_ctx(inst));
717 	kfree(inst);
718 }
719 EXPORT_SYMBOL_GPL(skcipher_geniv_free);
720 
skcipher_geniv_init(struct crypto_tfm * tfm)721 int skcipher_geniv_init(struct crypto_tfm *tfm)
722 {
723 	struct crypto_instance *inst = (void *)tfm->__crt_alg;
724 	struct crypto_ablkcipher *cipher;
725 
726 	cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
727 	if (IS_ERR(cipher))
728 		return PTR_ERR(cipher);
729 
730 	tfm->crt_ablkcipher.base = cipher;
731 	tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
732 
733 	return 0;
734 }
735 EXPORT_SYMBOL_GPL(skcipher_geniv_init);
736 
skcipher_geniv_exit(struct crypto_tfm * tfm)737 void skcipher_geniv_exit(struct crypto_tfm *tfm)
738 {
739 	crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
740 }
741 EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
742 
743 MODULE_LICENSE("GPL");
744 MODULE_DESCRIPTION("Generic block chaining cipher type");
745