• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Asynchronous block chaining cipher operations.
3  *
4  * This is the asynchronous version of blkcipher.c indicating completion
5  * via a callback.
6  *
7  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15 
16 #include <crypto/internal/skcipher.h>
17 #include <linux/cpumask.h>
18 #include <linux/err.h>
19 #include <linux/kernel.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/seq_file.h>
24 #include <linux/cryptouser.h>
25 #include <net/netlink.h>
26 
27 #include <crypto/scatterwalk.h>
28 
29 #include "internal.h"
30 
31 struct ablkcipher_buffer {
32 	struct list_head	entry;
33 	struct scatter_walk	dst;
34 	unsigned int		len;
35 	void			*data;
36 };
37 
38 enum {
39 	ABLKCIPHER_WALK_SLOW = 1 << 0,
40 };
41 
ablkcipher_buffer_write(struct ablkcipher_buffer * p)42 static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
43 {
44 	scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
45 }
46 
__ablkcipher_walk_complete(struct ablkcipher_walk * walk)47 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
48 {
49 	struct ablkcipher_buffer *p, *tmp;
50 
51 	list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
52 		ablkcipher_buffer_write(p);
53 		list_del(&p->entry);
54 		kfree(p);
55 	}
56 }
57 EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
58 
ablkcipher_queue_write(struct ablkcipher_walk * walk,struct ablkcipher_buffer * p)59 static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
60 					  struct ablkcipher_buffer *p)
61 {
62 	p->dst = walk->out;
63 	list_add_tail(&p->entry, &walk->buffers);
64 }
65 
66 /* Get a spot of the specified length that does not straddle a page.
67  * The caller needs to ensure that there is enough space for this operation.
68  */
ablkcipher_get_spot(u8 * start,unsigned int len)69 static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
70 {
71 	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
72 
73 	return max(start, end_page);
74 }
75 
ablkcipher_done_slow(struct ablkcipher_walk * walk,unsigned int n)76 static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
77 					unsigned int n)
78 {
79 	for (;;) {
80 		unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
81 
82 		if (len_this_page > n)
83 			len_this_page = n;
84 		scatterwalk_advance(&walk->out, n);
85 		if (n == len_this_page)
86 			break;
87 		n -= len_this_page;
88 		scatterwalk_start(&walk->out, sg_next(walk->out.sg));
89 	}
90 }
91 
ablkcipher_done_fast(struct ablkcipher_walk * walk,unsigned int n)92 static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
93 					unsigned int n)
94 {
95 	scatterwalk_advance(&walk->in, n);
96 	scatterwalk_advance(&walk->out, n);
97 }
98 
99 static int ablkcipher_walk_next(struct ablkcipher_request *req,
100 				struct ablkcipher_walk *walk);
101 
ablkcipher_walk_done(struct ablkcipher_request * req,struct ablkcipher_walk * walk,int err)102 int ablkcipher_walk_done(struct ablkcipher_request *req,
103 			 struct ablkcipher_walk *walk, int err)
104 {
105 	struct crypto_tfm *tfm = req->base.tfm;
106 	unsigned int n; /* bytes processed */
107 	bool more;
108 
109 	if (unlikely(err < 0))
110 		goto finish;
111 
112 	n = walk->nbytes - err;
113 	walk->total -= n;
114 	more = (walk->total != 0);
115 
116 	if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
117 		ablkcipher_done_fast(walk, n);
118 	} else {
119 		if (WARN_ON(err)) {
120 			/* unexpected case; didn't process all bytes */
121 			err = -EINVAL;
122 			goto finish;
123 		}
124 		ablkcipher_done_slow(walk, n);
125 	}
126 
127 	scatterwalk_done(&walk->in, 0, more);
128 	scatterwalk_done(&walk->out, 1, more);
129 
130 	if (more) {
131 		crypto_yield(req->base.flags);
132 		return ablkcipher_walk_next(req, walk);
133 	}
134 	err = 0;
135 finish:
136 	walk->nbytes = 0;
137 	if (walk->iv != req->info)
138 		memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
139 	kfree(walk->iv_buffer);
140 	return err;
141 }
142 EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
143 
ablkcipher_next_slow(struct ablkcipher_request * req,struct ablkcipher_walk * walk,unsigned int bsize,unsigned int alignmask,void ** src_p,void ** dst_p)144 static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
145 				       struct ablkcipher_walk *walk,
146 				       unsigned int bsize,
147 				       unsigned int alignmask,
148 				       void **src_p, void **dst_p)
149 {
150 	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
151 	struct ablkcipher_buffer *p;
152 	void *src, *dst, *base;
153 	unsigned int n;
154 
155 	n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
156 	n += (aligned_bsize * 3 - (alignmask + 1) +
157 	      (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
158 
159 	p = kmalloc(n, GFP_ATOMIC);
160 	if (!p)
161 		return ablkcipher_walk_done(req, walk, -ENOMEM);
162 
163 	base = p + 1;
164 
165 	dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
166 	src = dst = ablkcipher_get_spot(dst, bsize);
167 
168 	p->len = bsize;
169 	p->data = dst;
170 
171 	scatterwalk_copychunks(src, &walk->in, bsize, 0);
172 
173 	ablkcipher_queue_write(walk, p);
174 
175 	walk->nbytes = bsize;
176 	walk->flags |= ABLKCIPHER_WALK_SLOW;
177 
178 	*src_p = src;
179 	*dst_p = dst;
180 
181 	return 0;
182 }
183 
ablkcipher_copy_iv(struct ablkcipher_walk * walk,struct crypto_tfm * tfm,unsigned int alignmask)184 static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
185 				     struct crypto_tfm *tfm,
186 				     unsigned int alignmask)
187 {
188 	unsigned bs = walk->blocksize;
189 	unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
190 	unsigned aligned_bs = ALIGN(bs, alignmask + 1);
191 	unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
192 			    (alignmask + 1);
193 	u8 *iv;
194 
195 	size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
196 	walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
197 	if (!walk->iv_buffer)
198 		return -ENOMEM;
199 
200 	iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
201 	iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
202 	iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
203 	iv = ablkcipher_get_spot(iv, ivsize);
204 
205 	walk->iv = memcpy(iv, walk->iv, ivsize);
206 	return 0;
207 }
208 
ablkcipher_next_fast(struct ablkcipher_request * req,struct ablkcipher_walk * walk)209 static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
210 				       struct ablkcipher_walk *walk)
211 {
212 	walk->src.page = scatterwalk_page(&walk->in);
213 	walk->src.offset = offset_in_page(walk->in.offset);
214 	walk->dst.page = scatterwalk_page(&walk->out);
215 	walk->dst.offset = offset_in_page(walk->out.offset);
216 
217 	return 0;
218 }
219 
ablkcipher_walk_next(struct ablkcipher_request * req,struct ablkcipher_walk * walk)220 static int ablkcipher_walk_next(struct ablkcipher_request *req,
221 				struct ablkcipher_walk *walk)
222 {
223 	struct crypto_tfm *tfm = req->base.tfm;
224 	unsigned int alignmask, bsize, n;
225 	void *src, *dst;
226 	int err;
227 
228 	alignmask = crypto_tfm_alg_alignmask(tfm);
229 	n = walk->total;
230 	if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
231 		req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
232 		return ablkcipher_walk_done(req, walk, -EINVAL);
233 	}
234 
235 	walk->flags &= ~ABLKCIPHER_WALK_SLOW;
236 	src = dst = NULL;
237 
238 	bsize = min(walk->blocksize, n);
239 	n = scatterwalk_clamp(&walk->in, n);
240 	n = scatterwalk_clamp(&walk->out, n);
241 
242 	if (n < bsize ||
243 	    !scatterwalk_aligned(&walk->in, alignmask) ||
244 	    !scatterwalk_aligned(&walk->out, alignmask)) {
245 		err = ablkcipher_next_slow(req, walk, bsize, alignmask,
246 					   &src, &dst);
247 		goto set_phys_lowmem;
248 	}
249 
250 	walk->nbytes = n;
251 
252 	return ablkcipher_next_fast(req, walk);
253 
254 set_phys_lowmem:
255 	if (err >= 0) {
256 		walk->src.page = virt_to_page(src);
257 		walk->dst.page = virt_to_page(dst);
258 		walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
259 		walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
260 	}
261 
262 	return err;
263 }
264 
ablkcipher_walk_first(struct ablkcipher_request * req,struct ablkcipher_walk * walk)265 static int ablkcipher_walk_first(struct ablkcipher_request *req,
266 				 struct ablkcipher_walk *walk)
267 {
268 	struct crypto_tfm *tfm = req->base.tfm;
269 	unsigned int alignmask;
270 
271 	alignmask = crypto_tfm_alg_alignmask(tfm);
272 	if (WARN_ON_ONCE(in_irq()))
273 		return -EDEADLK;
274 
275 	walk->iv = req->info;
276 	walk->nbytes = walk->total;
277 	if (unlikely(!walk->total))
278 		return 0;
279 
280 	walk->iv_buffer = NULL;
281 	if (unlikely(((unsigned long)walk->iv & alignmask))) {
282 		int err = ablkcipher_copy_iv(walk, tfm, alignmask);
283 
284 		if (err)
285 			return err;
286 	}
287 
288 	scatterwalk_start(&walk->in, walk->in.sg);
289 	scatterwalk_start(&walk->out, walk->out.sg);
290 
291 	return ablkcipher_walk_next(req, walk);
292 }
293 
ablkcipher_walk_phys(struct ablkcipher_request * req,struct ablkcipher_walk * walk)294 int ablkcipher_walk_phys(struct ablkcipher_request *req,
295 			 struct ablkcipher_walk *walk)
296 {
297 	walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
298 	return ablkcipher_walk_first(req, walk);
299 }
300 EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
301 
setkey_unaligned(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)302 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
303 			    unsigned int keylen)
304 {
305 	struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
306 	unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
307 	int ret;
308 	u8 *buffer, *alignbuffer;
309 	unsigned long absize;
310 
311 	absize = keylen + alignmask;
312 	buffer = kmalloc(absize, GFP_ATOMIC);
313 	if (!buffer)
314 		return -ENOMEM;
315 
316 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
317 	memcpy(alignbuffer, key, keylen);
318 	ret = cipher->setkey(tfm, alignbuffer, keylen);
319 	memset(alignbuffer, 0, keylen);
320 	kfree(buffer);
321 	return ret;
322 }
323 
setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)324 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
325 		  unsigned int keylen)
326 {
327 	struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
328 	unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
329 
330 	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
331 		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
332 		return -EINVAL;
333 	}
334 
335 	if ((unsigned long)key & alignmask)
336 		return setkey_unaligned(tfm, key, keylen);
337 
338 	return cipher->setkey(tfm, key, keylen);
339 }
340 
crypto_ablkcipher_ctxsize(struct crypto_alg * alg,u32 type,u32 mask)341 static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
342 					      u32 mask)
343 {
344 	return alg->cra_ctxsize;
345 }
346 
skcipher_null_givencrypt(struct skcipher_givcrypt_request * req)347 int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
348 {
349 	return crypto_ablkcipher_encrypt(&req->creq);
350 }
351 
skcipher_null_givdecrypt(struct skcipher_givcrypt_request * req)352 int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
353 {
354 	return crypto_ablkcipher_decrypt(&req->creq);
355 }
356 
crypto_init_ablkcipher_ops(struct crypto_tfm * tfm,u32 type,u32 mask)357 static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
358 				      u32 mask)
359 {
360 	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
361 	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
362 
363 	if (alg->ivsize > PAGE_SIZE / 8)
364 		return -EINVAL;
365 
366 	crt->setkey = setkey;
367 	crt->encrypt = alg->encrypt;
368 	crt->decrypt = alg->decrypt;
369 	if (!alg->ivsize) {
370 		crt->givencrypt = skcipher_null_givencrypt;
371 		crt->givdecrypt = skcipher_null_givdecrypt;
372 	}
373 	crt->base = __crypto_ablkcipher_cast(tfm);
374 	crt->ivsize = alg->ivsize;
375 
376 	return 0;
377 }
378 
379 #ifdef CONFIG_NET
crypto_ablkcipher_report(struct sk_buff * skb,struct crypto_alg * alg)380 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
381 {
382 	struct crypto_report_blkcipher rblkcipher;
383 
384 	strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
385 	strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
386 		sizeof(rblkcipher.geniv));
387 	rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
388 
389 	rblkcipher.blocksize = alg->cra_blocksize;
390 	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
391 	rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
392 	rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
393 
394 	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
395 		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
396 		goto nla_put_failure;
397 	return 0;
398 
399 nla_put_failure:
400 	return -EMSGSIZE;
401 }
402 #else
crypto_ablkcipher_report(struct sk_buff * skb,struct crypto_alg * alg)403 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
404 {
405 	return -ENOSYS;
406 }
407 #endif
408 
409 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
410 	__attribute__ ((unused));
crypto_ablkcipher_show(struct seq_file * m,struct crypto_alg * alg)411 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
412 {
413 	struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
414 
415 	seq_printf(m, "type         : ablkcipher\n");
416 	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
417 					     "yes" : "no");
418 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
419 	seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
420 	seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
421 	seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
422 	seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<default>");
423 }
424 
425 const struct crypto_type crypto_ablkcipher_type = {
426 	.ctxsize = crypto_ablkcipher_ctxsize,
427 	.init = crypto_init_ablkcipher_ops,
428 #ifdef CONFIG_PROC_FS
429 	.show = crypto_ablkcipher_show,
430 #endif
431 	.report = crypto_ablkcipher_report,
432 };
433 EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
434 
no_givdecrypt(struct skcipher_givcrypt_request * req)435 static int no_givdecrypt(struct skcipher_givcrypt_request *req)
436 {
437 	return -ENOSYS;
438 }
439 
crypto_init_givcipher_ops(struct crypto_tfm * tfm,u32 type,u32 mask)440 static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
441 				      u32 mask)
442 {
443 	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
444 	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
445 
446 	if (alg->ivsize > PAGE_SIZE / 8)
447 		return -EINVAL;
448 
449 	crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
450 		      alg->setkey : setkey;
451 	crt->encrypt = alg->encrypt;
452 	crt->decrypt = alg->decrypt;
453 	crt->givencrypt = alg->givencrypt ?: no_givdecrypt;
454 	crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
455 	crt->base = __crypto_ablkcipher_cast(tfm);
456 	crt->ivsize = alg->ivsize;
457 
458 	return 0;
459 }
460 
461 #ifdef CONFIG_NET
crypto_givcipher_report(struct sk_buff * skb,struct crypto_alg * alg)462 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
463 {
464 	struct crypto_report_blkcipher rblkcipher;
465 
466 	strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
467 	strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
468 		sizeof(rblkcipher.geniv));
469 	rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
470 
471 	rblkcipher.blocksize = alg->cra_blocksize;
472 	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
473 	rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
474 	rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
475 
476 	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
477 		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
478 		goto nla_put_failure;
479 	return 0;
480 
481 nla_put_failure:
482 	return -EMSGSIZE;
483 }
484 #else
crypto_givcipher_report(struct sk_buff * skb,struct crypto_alg * alg)485 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
486 {
487 	return -ENOSYS;
488 }
489 #endif
490 
491 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
492 	__attribute__ ((unused));
crypto_givcipher_show(struct seq_file * m,struct crypto_alg * alg)493 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
494 {
495 	struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
496 
497 	seq_printf(m, "type         : givcipher\n");
498 	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
499 					     "yes" : "no");
500 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
501 	seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
502 	seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
503 	seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
504 	seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<built-in>");
505 }
506 
507 const struct crypto_type crypto_givcipher_type = {
508 	.ctxsize = crypto_ablkcipher_ctxsize,
509 	.init = crypto_init_givcipher_ops,
510 #ifdef CONFIG_PROC_FS
511 	.show = crypto_givcipher_show,
512 #endif
513 	.report = crypto_givcipher_report,
514 };
515 EXPORT_SYMBOL_GPL(crypto_givcipher_type);
516 
crypto_default_geniv(const struct crypto_alg * alg)517 const char *crypto_default_geniv(const struct crypto_alg *alg)
518 {
519 	if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
520 	     CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
521 					 alg->cra_ablkcipher.ivsize) !=
522 	    alg->cra_blocksize)
523 		return "chainiv";
524 
525 	return "eseqiv";
526 }
527 
crypto_givcipher_default(struct crypto_alg * alg,u32 type,u32 mask)528 static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
529 {
530 	struct rtattr *tb[3];
531 	struct {
532 		struct rtattr attr;
533 		struct crypto_attr_type data;
534 	} ptype;
535 	struct {
536 		struct rtattr attr;
537 		struct crypto_attr_alg data;
538 	} palg;
539 	struct crypto_template *tmpl;
540 	struct crypto_instance *inst;
541 	struct crypto_alg *larval;
542 	const char *geniv;
543 	int err;
544 
545 	larval = crypto_larval_lookup(alg->cra_driver_name,
546 				      (type & ~CRYPTO_ALG_TYPE_MASK) |
547 				      CRYPTO_ALG_TYPE_GIVCIPHER,
548 				      mask | CRYPTO_ALG_TYPE_MASK);
549 	err = PTR_ERR(larval);
550 	if (IS_ERR(larval))
551 		goto out;
552 
553 	err = -EAGAIN;
554 	if (!crypto_is_larval(larval))
555 		goto drop_larval;
556 
557 	ptype.attr.rta_len = sizeof(ptype);
558 	ptype.attr.rta_type = CRYPTOA_TYPE;
559 	ptype.data.type = type | CRYPTO_ALG_GENIV;
560 	/* GENIV tells the template that we're making a default geniv. */
561 	ptype.data.mask = mask | CRYPTO_ALG_GENIV;
562 	tb[0] = &ptype.attr;
563 
564 	palg.attr.rta_len = sizeof(palg);
565 	palg.attr.rta_type = CRYPTOA_ALG;
566 	/* Must use the exact name to locate ourselves. */
567 	memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
568 	tb[1] = &palg.attr;
569 
570 	tb[2] = NULL;
571 
572 	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
573 	    CRYPTO_ALG_TYPE_BLKCIPHER)
574 		geniv = alg->cra_blkcipher.geniv;
575 	else
576 		geniv = alg->cra_ablkcipher.geniv;
577 
578 	if (!geniv)
579 		geniv = crypto_default_geniv(alg);
580 
581 	tmpl = crypto_lookup_template(geniv);
582 	err = -ENOENT;
583 	if (!tmpl)
584 		goto kill_larval;
585 
586 	if (tmpl->create) {
587 		err = tmpl->create(tmpl, tb);
588 		if (err)
589 			goto put_tmpl;
590 		goto ok;
591 	}
592 
593 	inst = tmpl->alloc(tb);
594 	err = PTR_ERR(inst);
595 	if (IS_ERR(inst))
596 		goto put_tmpl;
597 
598 	err = crypto_register_instance(tmpl, inst);
599 	if (err) {
600 		tmpl->free(inst);
601 		goto put_tmpl;
602 	}
603 
604 ok:
605 	/* Redo the lookup to use the instance we just registered. */
606 	err = -EAGAIN;
607 
608 put_tmpl:
609 	crypto_tmpl_put(tmpl);
610 kill_larval:
611 	crypto_larval_kill(larval);
612 drop_larval:
613 	crypto_mod_put(larval);
614 out:
615 	crypto_mod_put(alg);
616 	return err;
617 }
618 
crypto_lookup_skcipher(const char * name,u32 type,u32 mask)619 struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask)
620 {
621 	struct crypto_alg *alg;
622 
623 	alg = crypto_alg_mod_lookup(name, type, mask);
624 	if (IS_ERR(alg))
625 		return alg;
626 
627 	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
628 	    CRYPTO_ALG_TYPE_GIVCIPHER)
629 		return alg;
630 
631 	if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
632 	      CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
633 					  alg->cra_ablkcipher.ivsize))
634 		return alg;
635 
636 	crypto_mod_put(alg);
637 	alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
638 				    mask & ~CRYPTO_ALG_TESTED);
639 	if (IS_ERR(alg))
640 		return alg;
641 
642 	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
643 	    CRYPTO_ALG_TYPE_GIVCIPHER) {
644 		if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) {
645 			crypto_mod_put(alg);
646 			alg = ERR_PTR(-ENOENT);
647 		}
648 		return alg;
649 	}
650 
651 	BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
652 		 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
653 					     alg->cra_ablkcipher.ivsize));
654 
655 	return ERR_PTR(crypto_givcipher_default(alg, type, mask));
656 }
657 EXPORT_SYMBOL_GPL(crypto_lookup_skcipher);
658 
crypto_grab_skcipher(struct crypto_skcipher_spawn * spawn,const char * name,u32 type,u32 mask)659 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
660 			 u32 type, u32 mask)
661 {
662 	struct crypto_alg *alg;
663 	int err;
664 
665 	type = crypto_skcipher_type(type);
666 	mask = crypto_skcipher_mask(mask);
667 
668 	alg = crypto_lookup_skcipher(name, type, mask);
669 	if (IS_ERR(alg))
670 		return PTR_ERR(alg);
671 
672 	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
673 	crypto_mod_put(alg);
674 	return err;
675 }
676 EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
677 
crypto_alloc_ablkcipher(const char * alg_name,u32 type,u32 mask)678 struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
679 						  u32 type, u32 mask)
680 {
681 	struct crypto_tfm *tfm;
682 	int err;
683 
684 	type = crypto_skcipher_type(type);
685 	mask = crypto_skcipher_mask(mask);
686 
687 	for (;;) {
688 		struct crypto_alg *alg;
689 
690 		alg = crypto_lookup_skcipher(alg_name, type, mask);
691 		if (IS_ERR(alg)) {
692 			err = PTR_ERR(alg);
693 			goto err;
694 		}
695 
696 		tfm = __crypto_alloc_tfm(alg, type, mask);
697 		if (!IS_ERR(tfm))
698 			return __crypto_ablkcipher_cast(tfm);
699 
700 		crypto_mod_put(alg);
701 		err = PTR_ERR(tfm);
702 
703 err:
704 		if (err != -EAGAIN)
705 			break;
706 		if (fatal_signal_pending(current)) {
707 			err = -EINTR;
708 			break;
709 		}
710 	}
711 
712 	return ERR_PTR(err);
713 }
714 EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
715