• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Asynchronous block chaining cipher operations.
3  *
4  * This is the asynchronous version of blkcipher.c indicating completion
5  * via a callback.
6  *
7  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15 
16 #include <crypto/internal/skcipher.h>
17 #include <linux/cpumask.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/seq_file.h>
26 #include <linux/cryptouser.h>
27 #include <net/netlink.h>
28 
29 #include <crypto/scatterwalk.h>
30 
31 #include "internal.h"
32 
33 static const char *skcipher_default_geniv __read_mostly;
34 
35 struct ablkcipher_buffer {
36 	struct list_head	entry;
37 	struct scatter_walk	dst;
38 	unsigned int		len;
39 	void			*data;
40 };
41 
42 enum {
43 	ABLKCIPHER_WALK_SLOW = 1 << 0,
44 };
45 
ablkcipher_buffer_write(struct ablkcipher_buffer * p)46 static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
47 {
48 	scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
49 }
50 
__ablkcipher_walk_complete(struct ablkcipher_walk * walk)51 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
52 {
53 	struct ablkcipher_buffer *p, *tmp;
54 
55 	list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
56 		ablkcipher_buffer_write(p);
57 		list_del(&p->entry);
58 		kfree(p);
59 	}
60 }
61 EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
62 
ablkcipher_queue_write(struct ablkcipher_walk * walk,struct ablkcipher_buffer * p)63 static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
64 					  struct ablkcipher_buffer *p)
65 {
66 	p->dst = walk->out;
67 	list_add_tail(&p->entry, &walk->buffers);
68 }
69 
70 /* Get a spot of the specified length that does not straddle a page.
71  * The caller needs to ensure that there is enough space for this operation.
72  */
ablkcipher_get_spot(u8 * start,unsigned int len)73 static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
74 {
75 	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
76 	return max(start, end_page);
77 }
78 
ablkcipher_done_slow(struct ablkcipher_walk * walk,unsigned int bsize)79 static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
80 						unsigned int bsize)
81 {
82 	unsigned int n = bsize;
83 
84 	for (;;) {
85 		unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
86 
87 		if (len_this_page > n)
88 			len_this_page = n;
89 		scatterwalk_advance(&walk->out, n);
90 		if (n == len_this_page)
91 			break;
92 		n -= len_this_page;
93 		scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
94 	}
95 
96 	return bsize;
97 }
98 
ablkcipher_done_fast(struct ablkcipher_walk * walk,unsigned int n)99 static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
100 						unsigned int n)
101 {
102 	scatterwalk_advance(&walk->in, n);
103 	scatterwalk_advance(&walk->out, n);
104 
105 	return n;
106 }
107 
108 static int ablkcipher_walk_next(struct ablkcipher_request *req,
109 				struct ablkcipher_walk *walk);
110 
ablkcipher_walk_done(struct ablkcipher_request * req,struct ablkcipher_walk * walk,int err)111 int ablkcipher_walk_done(struct ablkcipher_request *req,
112 			 struct ablkcipher_walk *walk, int err)
113 {
114 	struct crypto_tfm *tfm = req->base.tfm;
115 	unsigned int nbytes = 0;
116 
117 	if (likely(err >= 0)) {
118 		unsigned int n = walk->nbytes - err;
119 
120 		if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
121 			n = ablkcipher_done_fast(walk, n);
122 		else if (WARN_ON(err)) {
123 			err = -EINVAL;
124 			goto err;
125 		} else
126 			n = ablkcipher_done_slow(walk, n);
127 
128 		nbytes = walk->total - n;
129 		err = 0;
130 	}
131 
132 	scatterwalk_done(&walk->in, 0, nbytes);
133 	scatterwalk_done(&walk->out, 1, nbytes);
134 
135 err:
136 	walk->total = nbytes;
137 	walk->nbytes = nbytes;
138 
139 	if (nbytes) {
140 		crypto_yield(req->base.flags);
141 		return ablkcipher_walk_next(req, walk);
142 	}
143 
144 	if (walk->iv != req->info)
145 		memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
146 	kfree(walk->iv_buffer);
147 
148 	return err;
149 }
150 EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
151 
ablkcipher_next_slow(struct ablkcipher_request * req,struct ablkcipher_walk * walk,unsigned int bsize,unsigned int alignmask,void ** src_p,void ** dst_p)152 static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
153 				       struct ablkcipher_walk *walk,
154 				       unsigned int bsize,
155 				       unsigned int alignmask,
156 				       void **src_p, void **dst_p)
157 {
158 	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
159 	struct ablkcipher_buffer *p;
160 	void *src, *dst, *base;
161 	unsigned int n;
162 
163 	n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
164 	n += (aligned_bsize * 3 - (alignmask + 1) +
165 	      (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
166 
167 	p = kmalloc(n, GFP_ATOMIC);
168 	if (!p)
169 		return ablkcipher_walk_done(req, walk, -ENOMEM);
170 
171 	base = p + 1;
172 
173 	dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
174 	src = dst = ablkcipher_get_spot(dst, bsize);
175 
176 	p->len = bsize;
177 	p->data = dst;
178 
179 	scatterwalk_copychunks(src, &walk->in, bsize, 0);
180 
181 	ablkcipher_queue_write(walk, p);
182 
183 	walk->nbytes = bsize;
184 	walk->flags |= ABLKCIPHER_WALK_SLOW;
185 
186 	*src_p = src;
187 	*dst_p = dst;
188 
189 	return 0;
190 }
191 
ablkcipher_copy_iv(struct ablkcipher_walk * walk,struct crypto_tfm * tfm,unsigned int alignmask)192 static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
193 				     struct crypto_tfm *tfm,
194 				     unsigned int alignmask)
195 {
196 	unsigned bs = walk->blocksize;
197 	unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
198 	unsigned aligned_bs = ALIGN(bs, alignmask + 1);
199 	unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
200 			    (alignmask + 1);
201 	u8 *iv;
202 
203 	size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
204 	walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
205 	if (!walk->iv_buffer)
206 		return -ENOMEM;
207 
208 	iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
209 	iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
210 	iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
211 	iv = ablkcipher_get_spot(iv, ivsize);
212 
213 	walk->iv = memcpy(iv, walk->iv, ivsize);
214 	return 0;
215 }
216 
ablkcipher_next_fast(struct ablkcipher_request * req,struct ablkcipher_walk * walk)217 static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
218 				       struct ablkcipher_walk *walk)
219 {
220 	walk->src.page = scatterwalk_page(&walk->in);
221 	walk->src.offset = offset_in_page(walk->in.offset);
222 	walk->dst.page = scatterwalk_page(&walk->out);
223 	walk->dst.offset = offset_in_page(walk->out.offset);
224 
225 	return 0;
226 }
227 
ablkcipher_walk_next(struct ablkcipher_request * req,struct ablkcipher_walk * walk)228 static int ablkcipher_walk_next(struct ablkcipher_request *req,
229 				struct ablkcipher_walk *walk)
230 {
231 	struct crypto_tfm *tfm = req->base.tfm;
232 	unsigned int alignmask, bsize, n;
233 	void *src, *dst;
234 	int err;
235 
236 	alignmask = crypto_tfm_alg_alignmask(tfm);
237 	n = walk->total;
238 	if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
239 		req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
240 		return ablkcipher_walk_done(req, walk, -EINVAL);
241 	}
242 
243 	walk->flags &= ~ABLKCIPHER_WALK_SLOW;
244 	src = dst = NULL;
245 
246 	bsize = min(walk->blocksize, n);
247 	n = scatterwalk_clamp(&walk->in, n);
248 	n = scatterwalk_clamp(&walk->out, n);
249 
250 	if (n < bsize ||
251 	    !scatterwalk_aligned(&walk->in, alignmask) ||
252 	    !scatterwalk_aligned(&walk->out, alignmask)) {
253 		err = ablkcipher_next_slow(req, walk, bsize, alignmask,
254 					   &src, &dst);
255 		goto set_phys_lowmem;
256 	}
257 
258 	walk->nbytes = n;
259 
260 	return ablkcipher_next_fast(req, walk);
261 
262 set_phys_lowmem:
263 	if (err >= 0) {
264 		walk->src.page = virt_to_page(src);
265 		walk->dst.page = virt_to_page(dst);
266 		walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
267 		walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
268 	}
269 
270 	return err;
271 }
272 
ablkcipher_walk_first(struct ablkcipher_request * req,struct ablkcipher_walk * walk)273 static int ablkcipher_walk_first(struct ablkcipher_request *req,
274 				 struct ablkcipher_walk *walk)
275 {
276 	struct crypto_tfm *tfm = req->base.tfm;
277 	unsigned int alignmask;
278 
279 	alignmask = crypto_tfm_alg_alignmask(tfm);
280 	if (WARN_ON_ONCE(in_irq()))
281 		return -EDEADLK;
282 
283 	walk->nbytes = walk->total;
284 	if (unlikely(!walk->total))
285 		return 0;
286 
287 	walk->iv_buffer = NULL;
288 	walk->iv = req->info;
289 	if (unlikely(((unsigned long)walk->iv & alignmask))) {
290 		int err = ablkcipher_copy_iv(walk, tfm, alignmask);
291 		if (err)
292 			return err;
293 	}
294 
295 	scatterwalk_start(&walk->in, walk->in.sg);
296 	scatterwalk_start(&walk->out, walk->out.sg);
297 
298 	return ablkcipher_walk_next(req, walk);
299 }
300 
ablkcipher_walk_phys(struct ablkcipher_request * req,struct ablkcipher_walk * walk)301 int ablkcipher_walk_phys(struct ablkcipher_request *req,
302 			 struct ablkcipher_walk *walk)
303 {
304 	walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
305 	return ablkcipher_walk_first(req, walk);
306 }
307 EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
308 
setkey_unaligned(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)309 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
310 			    unsigned int keylen)
311 {
312 	struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
313 	unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
314 	int ret;
315 	u8 *buffer, *alignbuffer;
316 	unsigned long absize;
317 
318 	absize = keylen + alignmask;
319 	buffer = kmalloc(absize, GFP_ATOMIC);
320 	if (!buffer)
321 		return -ENOMEM;
322 
323 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
324 	memcpy(alignbuffer, key, keylen);
325 	ret = cipher->setkey(tfm, alignbuffer, keylen);
326 	memset(alignbuffer, 0, keylen);
327 	kfree(buffer);
328 	return ret;
329 }
330 
setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)331 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
332 		  unsigned int keylen)
333 {
334 	struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
335 	unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
336 
337 	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
338 		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
339 		return -EINVAL;
340 	}
341 
342 	if ((unsigned long)key & alignmask)
343 		return setkey_unaligned(tfm, key, keylen);
344 
345 	return cipher->setkey(tfm, key, keylen);
346 }
347 
crypto_ablkcipher_ctxsize(struct crypto_alg * alg,u32 type,u32 mask)348 static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
349 					      u32 mask)
350 {
351 	return alg->cra_ctxsize;
352 }
353 
skcipher_null_givencrypt(struct skcipher_givcrypt_request * req)354 int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
355 {
356 	return crypto_ablkcipher_encrypt(&req->creq);
357 }
358 
skcipher_null_givdecrypt(struct skcipher_givcrypt_request * req)359 int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
360 {
361 	return crypto_ablkcipher_decrypt(&req->creq);
362 }
363 
crypto_init_ablkcipher_ops(struct crypto_tfm * tfm,u32 type,u32 mask)364 static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
365 				      u32 mask)
366 {
367 	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
368 	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
369 
370 	if (alg->ivsize > PAGE_SIZE / 8)
371 		return -EINVAL;
372 
373 	crt->setkey = setkey;
374 	crt->encrypt = alg->encrypt;
375 	crt->decrypt = alg->decrypt;
376 	if (!alg->ivsize) {
377 		crt->givencrypt = skcipher_null_givencrypt;
378 		crt->givdecrypt = skcipher_null_givdecrypt;
379 	}
380 	crt->base = __crypto_ablkcipher_cast(tfm);
381 	crt->ivsize = alg->ivsize;
382 
383 	return 0;
384 }
385 
386 #ifdef CONFIG_NET
crypto_ablkcipher_report(struct sk_buff * skb,struct crypto_alg * alg)387 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
388 {
389 	struct crypto_report_blkcipher rblkcipher;
390 
391 	strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
392 	strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
393 		sizeof(rblkcipher.geniv));
394 
395 	rblkcipher.blocksize = alg->cra_blocksize;
396 	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
397 	rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
398 	rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
399 
400 	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
401 		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
402 		goto nla_put_failure;
403 	return 0;
404 
405 nla_put_failure:
406 	return -EMSGSIZE;
407 }
408 #else
crypto_ablkcipher_report(struct sk_buff * skb,struct crypto_alg * alg)409 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
410 {
411 	return -ENOSYS;
412 }
413 #endif
414 
415 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
416 	__attribute__ ((unused));
crypto_ablkcipher_show(struct seq_file * m,struct crypto_alg * alg)417 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
418 {
419 	struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
420 
421 	seq_printf(m, "type         : ablkcipher\n");
422 	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
423 					     "yes" : "no");
424 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
425 	seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
426 	seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
427 	seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
428 	seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<default>");
429 }
430 
431 const struct crypto_type crypto_ablkcipher_type = {
432 	.ctxsize = crypto_ablkcipher_ctxsize,
433 	.init = crypto_init_ablkcipher_ops,
434 #ifdef CONFIG_PROC_FS
435 	.show = crypto_ablkcipher_show,
436 #endif
437 	.report = crypto_ablkcipher_report,
438 };
439 EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
440 
no_givdecrypt(struct skcipher_givcrypt_request * req)441 static int no_givdecrypt(struct skcipher_givcrypt_request *req)
442 {
443 	return -ENOSYS;
444 }
445 
crypto_init_givcipher_ops(struct crypto_tfm * tfm,u32 type,u32 mask)446 static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
447 				      u32 mask)
448 {
449 	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
450 	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
451 
452 	if (alg->ivsize > PAGE_SIZE / 8)
453 		return -EINVAL;
454 
455 	crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
456 		      alg->setkey : setkey;
457 	crt->encrypt = alg->encrypt;
458 	crt->decrypt = alg->decrypt;
459 	crt->givencrypt = alg->givencrypt;
460 	crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
461 	crt->base = __crypto_ablkcipher_cast(tfm);
462 	crt->ivsize = alg->ivsize;
463 
464 	return 0;
465 }
466 
467 #ifdef CONFIG_NET
crypto_givcipher_report(struct sk_buff * skb,struct crypto_alg * alg)468 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
469 {
470 	struct crypto_report_blkcipher rblkcipher;
471 
472 	strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
473 	strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
474 		sizeof(rblkcipher.geniv));
475 
476 	rblkcipher.blocksize = alg->cra_blocksize;
477 	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
478 	rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
479 	rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
480 
481 	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
482 		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
483 		goto nla_put_failure;
484 	return 0;
485 
486 nla_put_failure:
487 	return -EMSGSIZE;
488 }
489 #else
crypto_givcipher_report(struct sk_buff * skb,struct crypto_alg * alg)490 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
491 {
492 	return -ENOSYS;
493 }
494 #endif
495 
496 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
497 	__attribute__ ((unused));
crypto_givcipher_show(struct seq_file * m,struct crypto_alg * alg)498 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
499 {
500 	struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
501 
502 	seq_printf(m, "type         : givcipher\n");
503 	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
504 					     "yes" : "no");
505 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
506 	seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
507 	seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
508 	seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
509 	seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<built-in>");
510 }
511 
512 const struct crypto_type crypto_givcipher_type = {
513 	.ctxsize = crypto_ablkcipher_ctxsize,
514 	.init = crypto_init_givcipher_ops,
515 #ifdef CONFIG_PROC_FS
516 	.show = crypto_givcipher_show,
517 #endif
518 	.report = crypto_givcipher_report,
519 };
520 EXPORT_SYMBOL_GPL(crypto_givcipher_type);
521 
crypto_default_geniv(const struct crypto_alg * alg)522 const char *crypto_default_geniv(const struct crypto_alg *alg)
523 {
524 	if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
525 	     CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
526 					 alg->cra_ablkcipher.ivsize) !=
527 	    alg->cra_blocksize)
528 		return "chainiv";
529 
530 	return alg->cra_flags & CRYPTO_ALG_ASYNC ?
531 	       "eseqiv" : skcipher_default_geniv;
532 }
533 
crypto_givcipher_default(struct crypto_alg * alg,u32 type,u32 mask)534 static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
535 {
536 	struct rtattr *tb[3];
537 	struct {
538 		struct rtattr attr;
539 		struct crypto_attr_type data;
540 	} ptype;
541 	struct {
542 		struct rtattr attr;
543 		struct crypto_attr_alg data;
544 	} palg;
545 	struct crypto_template *tmpl;
546 	struct crypto_instance *inst;
547 	struct crypto_alg *larval;
548 	const char *geniv;
549 	int err;
550 
551 	larval = crypto_larval_lookup(alg->cra_driver_name,
552 				      (type & ~CRYPTO_ALG_TYPE_MASK) |
553 				      CRYPTO_ALG_TYPE_GIVCIPHER,
554 				      mask | CRYPTO_ALG_TYPE_MASK);
555 	err = PTR_ERR(larval);
556 	if (IS_ERR(larval))
557 		goto out;
558 
559 	err = -EAGAIN;
560 	if (!crypto_is_larval(larval))
561 		goto drop_larval;
562 
563 	ptype.attr.rta_len = sizeof(ptype);
564 	ptype.attr.rta_type = CRYPTOA_TYPE;
565 	ptype.data.type = type | CRYPTO_ALG_GENIV;
566 	/* GENIV tells the template that we're making a default geniv. */
567 	ptype.data.mask = mask | CRYPTO_ALG_GENIV;
568 	tb[0] = &ptype.attr;
569 
570 	palg.attr.rta_len = sizeof(palg);
571 	palg.attr.rta_type = CRYPTOA_ALG;
572 	/* Must use the exact name to locate ourselves. */
573 	memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
574 	tb[1] = &palg.attr;
575 
576 	tb[2] = NULL;
577 
578 	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
579 	    CRYPTO_ALG_TYPE_BLKCIPHER)
580 		geniv = alg->cra_blkcipher.geniv;
581 	else
582 		geniv = alg->cra_ablkcipher.geniv;
583 
584 	if (!geniv)
585 		geniv = crypto_default_geniv(alg);
586 
587 	tmpl = crypto_lookup_template(geniv);
588 	err = -ENOENT;
589 	if (!tmpl)
590 		goto kill_larval;
591 
592 	inst = tmpl->alloc(tb);
593 	err = PTR_ERR(inst);
594 	if (IS_ERR(inst))
595 		goto put_tmpl;
596 
597 	if ((err = crypto_register_instance(tmpl, inst))) {
598 		tmpl->free(inst);
599 		goto put_tmpl;
600 	}
601 
602 	/* Redo the lookup to use the instance we just registered. */
603 	err = -EAGAIN;
604 
605 put_tmpl:
606 	crypto_tmpl_put(tmpl);
607 kill_larval:
608 	crypto_larval_kill(larval);
609 drop_larval:
610 	crypto_mod_put(larval);
611 out:
612 	crypto_mod_put(alg);
613 	return err;
614 }
615 
crypto_lookup_skcipher(const char * name,u32 type,u32 mask)616 struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask)
617 {
618 	struct crypto_alg *alg;
619 
620 	alg = crypto_alg_mod_lookup(name, type, mask);
621 	if (IS_ERR(alg))
622 		return alg;
623 
624 	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
625 	    CRYPTO_ALG_TYPE_GIVCIPHER)
626 		return alg;
627 
628 	if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
629 	      CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
630 					  alg->cra_ablkcipher.ivsize))
631 		return alg;
632 
633 	crypto_mod_put(alg);
634 	alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
635 				    mask & ~CRYPTO_ALG_TESTED);
636 	if (IS_ERR(alg))
637 		return alg;
638 
639 	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
640 	    CRYPTO_ALG_TYPE_GIVCIPHER) {
641 		if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
642 			crypto_mod_put(alg);
643 			alg = ERR_PTR(-ENOENT);
644 		}
645 		return alg;
646 	}
647 
648 	BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
649 		 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
650 					     alg->cra_ablkcipher.ivsize));
651 
652 	return ERR_PTR(crypto_givcipher_default(alg, type, mask));
653 }
654 EXPORT_SYMBOL_GPL(crypto_lookup_skcipher);
655 
crypto_grab_skcipher(struct crypto_skcipher_spawn * spawn,const char * name,u32 type,u32 mask)656 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
657 			 u32 type, u32 mask)
658 {
659 	struct crypto_alg *alg;
660 	int err;
661 
662 	type = crypto_skcipher_type(type);
663 	mask = crypto_skcipher_mask(mask);
664 
665 	alg = crypto_lookup_skcipher(name, type, mask);
666 	if (IS_ERR(alg))
667 		return PTR_ERR(alg);
668 
669 	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
670 	crypto_mod_put(alg);
671 	return err;
672 }
673 EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
674 
crypto_alloc_ablkcipher(const char * alg_name,u32 type,u32 mask)675 struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
676 						  u32 type, u32 mask)
677 {
678 	struct crypto_tfm *tfm;
679 	int err;
680 
681 	type = crypto_skcipher_type(type);
682 	mask = crypto_skcipher_mask(mask);
683 
684 	for (;;) {
685 		struct crypto_alg *alg;
686 
687 		alg = crypto_lookup_skcipher(alg_name, type, mask);
688 		if (IS_ERR(alg)) {
689 			err = PTR_ERR(alg);
690 			goto err;
691 		}
692 
693 		tfm = __crypto_alloc_tfm(alg, type, mask);
694 		if (!IS_ERR(tfm))
695 			return __crypto_ablkcipher_cast(tfm);
696 
697 		crypto_mod_put(alg);
698 		err = PTR_ERR(tfm);
699 
700 err:
701 		if (err != -EAGAIN)
702 			break;
703 		if (signal_pending(current)) {
704 			err = -EINTR;
705 			break;
706 		}
707 	}
708 
709 	return ERR_PTR(err);
710 }
711 EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
712 
skcipher_module_init(void)713 static int __init skcipher_module_init(void)
714 {
715 	skcipher_default_geniv = num_possible_cpus() > 1 ?
716 				 "eseqiv" : "chainiv";
717 	return 0;
718 }
719 
skcipher_module_exit(void)720 static void skcipher_module_exit(void)
721 {
722 }
723 
724 module_init(skcipher_module_init);
725 module_exit(skcipher_module_exit);
726