• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Block chaining cipher operations.
3  *
4  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5  * multiple page boundaries by using temporary blocks.  In user context,
6  * the kernel is given a chance to schedule us once per page.
7  *
8  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License as published by the Free
12  * Software Foundation; either version 2 of the License, or (at your option)
13  * any later version.
14  *
15  */
16 
17 #include <crypto/aead.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/errno.h>
21 #include <linux/hardirq.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/scatterlist.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/string.h>
28 #include <linux/cryptouser.h>
29 #include <net/netlink.h>
30 
31 #include "internal.h"
32 
33 enum {
34 	BLKCIPHER_WALK_PHYS = 1 << 0,
35 	BLKCIPHER_WALK_SLOW = 1 << 1,
36 	BLKCIPHER_WALK_COPY = 1 << 2,
37 	BLKCIPHER_WALK_DIFF = 1 << 3,
38 };
39 
40 static int blkcipher_walk_next(struct blkcipher_desc *desc,
41 			       struct blkcipher_walk *walk);
42 static int blkcipher_walk_first(struct blkcipher_desc *desc,
43 				struct blkcipher_walk *walk);
44 
blkcipher_map_src(struct blkcipher_walk * walk)45 static inline void blkcipher_map_src(struct blkcipher_walk *walk)
46 {
47 	walk->src.virt.addr = scatterwalk_map(&walk->in);
48 }
49 
blkcipher_map_dst(struct blkcipher_walk * walk)50 static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
51 {
52 	walk->dst.virt.addr = scatterwalk_map(&walk->out);
53 }
54 
blkcipher_unmap_src(struct blkcipher_walk * walk)55 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
56 {
57 	scatterwalk_unmap(walk->src.virt.addr);
58 }
59 
blkcipher_unmap_dst(struct blkcipher_walk * walk)60 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
61 {
62 	scatterwalk_unmap(walk->dst.virt.addr);
63 }
64 
65 /* Get a spot of the specified length that does not straddle a page.
66  * The caller needs to ensure that there is enough space for this operation.
67  */
blkcipher_get_spot(u8 * start,unsigned int len)68 static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
69 {
70 	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
71 	return max(start, end_page);
72 }
73 
blkcipher_done_slow(struct blkcipher_walk * walk,unsigned int bsize)74 static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
75 				       unsigned int bsize)
76 {
77 	u8 *addr;
78 
79 	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
80 	addr = blkcipher_get_spot(addr, bsize);
81 	scatterwalk_copychunks(addr, &walk->out, bsize, 1);
82 }
83 
blkcipher_done_fast(struct blkcipher_walk * walk,unsigned int n)84 static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
85 				       unsigned int n)
86 {
87 	if (walk->flags & BLKCIPHER_WALK_COPY) {
88 		blkcipher_map_dst(walk);
89 		memcpy(walk->dst.virt.addr, walk->page, n);
90 		blkcipher_unmap_dst(walk);
91 	} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
92 		if (walk->flags & BLKCIPHER_WALK_DIFF)
93 			blkcipher_unmap_dst(walk);
94 		blkcipher_unmap_src(walk);
95 	}
96 
97 	scatterwalk_advance(&walk->in, n);
98 	scatterwalk_advance(&walk->out, n);
99 }
100 
blkcipher_walk_done(struct blkcipher_desc * desc,struct blkcipher_walk * walk,int err)101 int blkcipher_walk_done(struct blkcipher_desc *desc,
102 			struct blkcipher_walk *walk, int err)
103 {
104 	unsigned int n; /* bytes processed */
105 	bool more;
106 
107 	if (unlikely(err < 0))
108 		goto finish;
109 
110 	n = walk->nbytes - err;
111 	walk->total -= n;
112 	more = (walk->total != 0);
113 
114 	if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
115 		blkcipher_done_fast(walk, n);
116 	} else {
117 		if (WARN_ON(err)) {
118 			/* unexpected case; didn't process all bytes */
119 			err = -EINVAL;
120 			goto finish;
121 		}
122 		blkcipher_done_slow(walk, n);
123 	}
124 
125 	scatterwalk_done(&walk->in, 0, more);
126 	scatterwalk_done(&walk->out, 1, more);
127 
128 	if (more) {
129 		crypto_yield(desc->flags);
130 		return blkcipher_walk_next(desc, walk);
131 	}
132 	err = 0;
133 finish:
134 	walk->nbytes = 0;
135 	if (walk->iv != desc->info)
136 		memcpy(desc->info, walk->iv, walk->ivsize);
137 	if (walk->buffer != walk->page)
138 		kfree(walk->buffer);
139 	if (walk->page)
140 		free_page((unsigned long)walk->page);
141 	return err;
142 }
143 EXPORT_SYMBOL_GPL(blkcipher_walk_done);
144 
blkcipher_next_slow(struct blkcipher_desc * desc,struct blkcipher_walk * walk,unsigned int bsize,unsigned int alignmask)145 static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
146 				      struct blkcipher_walk *walk,
147 				      unsigned int bsize,
148 				      unsigned int alignmask)
149 {
150 	unsigned int n;
151 	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
152 
153 	if (walk->buffer)
154 		goto ok;
155 
156 	walk->buffer = walk->page;
157 	if (walk->buffer)
158 		goto ok;
159 
160 	n = aligned_bsize * 3 - (alignmask + 1) +
161 	    (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
162 	walk->buffer = kmalloc(n, GFP_ATOMIC);
163 	if (!walk->buffer)
164 		return blkcipher_walk_done(desc, walk, -ENOMEM);
165 
166 ok:
167 	walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
168 					  alignmask + 1);
169 	walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
170 	walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
171 						 aligned_bsize, bsize);
172 
173 	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
174 
175 	walk->nbytes = bsize;
176 	walk->flags |= BLKCIPHER_WALK_SLOW;
177 
178 	return 0;
179 }
180 
blkcipher_next_copy(struct blkcipher_walk * walk)181 static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
182 {
183 	u8 *tmp = walk->page;
184 
185 	blkcipher_map_src(walk);
186 	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
187 	blkcipher_unmap_src(walk);
188 
189 	walk->src.virt.addr = tmp;
190 	walk->dst.virt.addr = tmp;
191 
192 	return 0;
193 }
194 
blkcipher_next_fast(struct blkcipher_desc * desc,struct blkcipher_walk * walk)195 static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
196 				      struct blkcipher_walk *walk)
197 {
198 	unsigned long diff;
199 
200 	walk->src.phys.page = scatterwalk_page(&walk->in);
201 	walk->src.phys.offset = offset_in_page(walk->in.offset);
202 	walk->dst.phys.page = scatterwalk_page(&walk->out);
203 	walk->dst.phys.offset = offset_in_page(walk->out.offset);
204 
205 	if (walk->flags & BLKCIPHER_WALK_PHYS)
206 		return 0;
207 
208 	diff = walk->src.phys.offset - walk->dst.phys.offset;
209 	diff |= walk->src.virt.page - walk->dst.virt.page;
210 
211 	blkcipher_map_src(walk);
212 	walk->dst.virt.addr = walk->src.virt.addr;
213 
214 	if (diff) {
215 		walk->flags |= BLKCIPHER_WALK_DIFF;
216 		blkcipher_map_dst(walk);
217 	}
218 
219 	return 0;
220 }
221 
blkcipher_walk_next(struct blkcipher_desc * desc,struct blkcipher_walk * walk)222 static int blkcipher_walk_next(struct blkcipher_desc *desc,
223 			       struct blkcipher_walk *walk)
224 {
225 	unsigned int bsize;
226 	unsigned int n;
227 	int err;
228 
229 	n = walk->total;
230 	if (unlikely(n < walk->cipher_blocksize)) {
231 		desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
232 		return blkcipher_walk_done(desc, walk, -EINVAL);
233 	}
234 
235 	bsize = min(walk->walk_blocksize, n);
236 
237 	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
238 			 BLKCIPHER_WALK_DIFF);
239 	if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
240 	    !scatterwalk_aligned(&walk->out, walk->alignmask)) {
241 		walk->flags |= BLKCIPHER_WALK_COPY;
242 		if (!walk->page) {
243 			walk->page = (void *)__get_free_page(GFP_ATOMIC);
244 			if (!walk->page)
245 				n = 0;
246 		}
247 	}
248 
249 	n = scatterwalk_clamp(&walk->in, n);
250 	n = scatterwalk_clamp(&walk->out, n);
251 
252 	if (unlikely(n < bsize)) {
253 		err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
254 		goto set_phys_lowmem;
255 	}
256 
257 	walk->nbytes = n;
258 	if (walk->flags & BLKCIPHER_WALK_COPY) {
259 		err = blkcipher_next_copy(walk);
260 		goto set_phys_lowmem;
261 	}
262 
263 	return blkcipher_next_fast(desc, walk);
264 
265 set_phys_lowmem:
266 	if (walk->flags & BLKCIPHER_WALK_PHYS) {
267 		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
268 		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
269 		walk->src.phys.offset &= PAGE_SIZE - 1;
270 		walk->dst.phys.offset &= PAGE_SIZE - 1;
271 	}
272 	return err;
273 }
274 
blkcipher_copy_iv(struct blkcipher_walk * walk)275 static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
276 {
277 	unsigned bs = walk->walk_blocksize;
278 	unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
279 	unsigned int size = aligned_bs * 2 +
280 			    walk->ivsize + max(aligned_bs, walk->ivsize) -
281 			    (walk->alignmask + 1);
282 	u8 *iv;
283 
284 	size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
285 	walk->buffer = kmalloc(size, GFP_ATOMIC);
286 	if (!walk->buffer)
287 		return -ENOMEM;
288 
289 	iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
290 	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
291 	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
292 	iv = blkcipher_get_spot(iv, walk->ivsize);
293 
294 	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
295 	return 0;
296 }
297 
blkcipher_walk_virt(struct blkcipher_desc * desc,struct blkcipher_walk * walk)298 int blkcipher_walk_virt(struct blkcipher_desc *desc,
299 			struct blkcipher_walk *walk)
300 {
301 	walk->flags &= ~BLKCIPHER_WALK_PHYS;
302 	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
303 	walk->cipher_blocksize = walk->walk_blocksize;
304 	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
305 	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
306 	return blkcipher_walk_first(desc, walk);
307 }
308 EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
309 
blkcipher_walk_phys(struct blkcipher_desc * desc,struct blkcipher_walk * walk)310 int blkcipher_walk_phys(struct blkcipher_desc *desc,
311 			struct blkcipher_walk *walk)
312 {
313 	walk->flags |= BLKCIPHER_WALK_PHYS;
314 	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
315 	walk->cipher_blocksize = walk->walk_blocksize;
316 	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
317 	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
318 	return blkcipher_walk_first(desc, walk);
319 }
320 EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
321 
blkcipher_walk_first(struct blkcipher_desc * desc,struct blkcipher_walk * walk)322 static int blkcipher_walk_first(struct blkcipher_desc *desc,
323 				struct blkcipher_walk *walk)
324 {
325 	if (WARN_ON_ONCE(in_irq()))
326 		return -EDEADLK;
327 
328 	walk->iv = desc->info;
329 	walk->nbytes = walk->total;
330 	if (unlikely(!walk->total))
331 		return 0;
332 
333 	walk->buffer = NULL;
334 	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
335 		int err = blkcipher_copy_iv(walk);
336 		if (err)
337 			return err;
338 	}
339 
340 	scatterwalk_start(&walk->in, walk->in.sg);
341 	scatterwalk_start(&walk->out, walk->out.sg);
342 	walk->page = NULL;
343 
344 	return blkcipher_walk_next(desc, walk);
345 }
346 
blkcipher_walk_virt_block(struct blkcipher_desc * desc,struct blkcipher_walk * walk,unsigned int blocksize)347 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
348 			      struct blkcipher_walk *walk,
349 			      unsigned int blocksize)
350 {
351 	walk->flags &= ~BLKCIPHER_WALK_PHYS;
352 	walk->walk_blocksize = blocksize;
353 	walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
354 	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
355 	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
356 	return blkcipher_walk_first(desc, walk);
357 }
358 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
359 
blkcipher_aead_walk_virt_block(struct blkcipher_desc * desc,struct blkcipher_walk * walk,struct crypto_aead * tfm,unsigned int blocksize)360 int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
361 				   struct blkcipher_walk *walk,
362 				   struct crypto_aead *tfm,
363 				   unsigned int blocksize)
364 {
365 	walk->flags &= ~BLKCIPHER_WALK_PHYS;
366 	walk->walk_blocksize = blocksize;
367 	walk->cipher_blocksize = crypto_aead_blocksize(tfm);
368 	walk->ivsize = crypto_aead_ivsize(tfm);
369 	walk->alignmask = crypto_aead_alignmask(tfm);
370 	return blkcipher_walk_first(desc, walk);
371 }
372 EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
373 
374 /*
375  * This function allows ablkcipher algorithms to use the blkcipher_walk API to
376  * walk over their data.  The specified crypto_ablkcipher tfm is used to
377  * initialize the struct blkcipher_walk, and the crypto_blkcipher specified in
378  * desc->tfm is never used so it can be left NULL.  (Yes, this design is ugly,
379  * but it parallels blkcipher_aead_walk_virt_block() above.  In the 4.10 kernel
380  * this is starting to be cleaned up...)
381  */
blkcipher_ablkcipher_walk_virt(struct blkcipher_desc * desc,struct blkcipher_walk * walk,struct crypto_ablkcipher * tfm)382 int blkcipher_ablkcipher_walk_virt(struct blkcipher_desc *desc,
383 				   struct blkcipher_walk *walk,
384 				   struct crypto_ablkcipher *tfm)
385 {
386 	walk->flags &= ~BLKCIPHER_WALK_PHYS;
387 	walk->walk_blocksize = crypto_ablkcipher_blocksize(tfm);
388 	walk->cipher_blocksize = walk->walk_blocksize;
389 	walk->ivsize = crypto_ablkcipher_ivsize(tfm);
390 	walk->alignmask = crypto_ablkcipher_alignmask(tfm);
391 	return blkcipher_walk_first(desc, walk);
392 }
393 EXPORT_SYMBOL_GPL(blkcipher_ablkcipher_walk_virt);
394 
setkey_unaligned(struct crypto_tfm * tfm,const u8 * key,unsigned int keylen)395 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
396 			    unsigned int keylen)
397 {
398 	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
399 	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
400 	int ret;
401 	u8 *buffer, *alignbuffer;
402 	unsigned long absize;
403 
404 	absize = keylen + alignmask;
405 	buffer = kmalloc(absize, GFP_ATOMIC);
406 	if (!buffer)
407 		return -ENOMEM;
408 
409 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
410 	memcpy(alignbuffer, key, keylen);
411 	ret = cipher->setkey(tfm, alignbuffer, keylen);
412 	memset(alignbuffer, 0, keylen);
413 	kfree(buffer);
414 	return ret;
415 }
416 
setkey(struct crypto_tfm * tfm,const u8 * key,unsigned int keylen)417 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
418 {
419 	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
420 	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
421 
422 	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
423 		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
424 		return -EINVAL;
425 	}
426 
427 	if ((unsigned long)key & alignmask)
428 		return setkey_unaligned(tfm, key, keylen);
429 
430 	return cipher->setkey(tfm, key, keylen);
431 }
432 
async_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)433 static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
434 			unsigned int keylen)
435 {
436 	return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
437 }
438 
async_encrypt(struct ablkcipher_request * req)439 static int async_encrypt(struct ablkcipher_request *req)
440 {
441 	struct crypto_tfm *tfm = req->base.tfm;
442 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
443 	struct blkcipher_desc desc = {
444 		.tfm = __crypto_blkcipher_cast(tfm),
445 		.info = req->info,
446 		.flags = req->base.flags,
447 	};
448 
449 
450 	return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
451 }
452 
async_decrypt(struct ablkcipher_request * req)453 static int async_decrypt(struct ablkcipher_request *req)
454 {
455 	struct crypto_tfm *tfm = req->base.tfm;
456 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
457 	struct blkcipher_desc desc = {
458 		.tfm = __crypto_blkcipher_cast(tfm),
459 		.info = req->info,
460 		.flags = req->base.flags,
461 	};
462 
463 	return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
464 }
465 
crypto_blkcipher_ctxsize(struct crypto_alg * alg,u32 type,u32 mask)466 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
467 					     u32 mask)
468 {
469 	struct blkcipher_alg *cipher = &alg->cra_blkcipher;
470 	unsigned int len = alg->cra_ctxsize;
471 
472 	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
473 	    cipher->ivsize) {
474 		len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
475 		len += cipher->ivsize;
476 	}
477 
478 	return len;
479 }
480 
crypto_init_blkcipher_ops_async(struct crypto_tfm * tfm)481 static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
482 {
483 	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
484 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
485 
486 	crt->setkey = async_setkey;
487 	crt->encrypt = async_encrypt;
488 	crt->decrypt = async_decrypt;
489 	if (!alg->ivsize) {
490 		crt->givencrypt = skcipher_null_givencrypt;
491 		crt->givdecrypt = skcipher_null_givdecrypt;
492 	}
493 	crt->base = __crypto_ablkcipher_cast(tfm);
494 	crt->ivsize = alg->ivsize;
495 
496 	return 0;
497 }
498 
crypto_init_blkcipher_ops_sync(struct crypto_tfm * tfm)499 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
500 {
501 	struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
502 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
503 	unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
504 	unsigned long addr;
505 
506 	crt->setkey = setkey;
507 	crt->encrypt = alg->encrypt;
508 	crt->decrypt = alg->decrypt;
509 
510 	addr = (unsigned long)crypto_tfm_ctx(tfm);
511 	addr = ALIGN(addr, align);
512 	addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
513 	crt->iv = (void *)addr;
514 
515 	return 0;
516 }
517 
crypto_init_blkcipher_ops(struct crypto_tfm * tfm,u32 type,u32 mask)518 static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
519 {
520 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
521 
522 	if (alg->ivsize > PAGE_SIZE / 8)
523 		return -EINVAL;
524 
525 	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
526 		return crypto_init_blkcipher_ops_sync(tfm);
527 	else
528 		return crypto_init_blkcipher_ops_async(tfm);
529 }
530 
531 #ifdef CONFIG_NET
crypto_blkcipher_report(struct sk_buff * skb,struct crypto_alg * alg)532 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
533 {
534 	struct crypto_report_blkcipher rblkcipher;
535 
536 	strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
537 	strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
538 		sizeof(rblkcipher.geniv));
539 	rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
540 
541 	rblkcipher.blocksize = alg->cra_blocksize;
542 	rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
543 	rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
544 	rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
545 
546 	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
547 		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
548 		goto nla_put_failure;
549 	return 0;
550 
551 nla_put_failure:
552 	return -EMSGSIZE;
553 }
554 #else
crypto_blkcipher_report(struct sk_buff * skb,struct crypto_alg * alg)555 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
556 {
557 	return -ENOSYS;
558 }
559 #endif
560 
561 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
562 	__attribute__ ((unused));
crypto_blkcipher_show(struct seq_file * m,struct crypto_alg * alg)563 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
564 {
565 	seq_printf(m, "type         : blkcipher\n");
566 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
567 	seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
568 	seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
569 	seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
570 	seq_printf(m, "geniv        : %s\n", alg->cra_blkcipher.geniv ?:
571 					     "<default>");
572 }
573 
574 const struct crypto_type crypto_blkcipher_type = {
575 	.ctxsize = crypto_blkcipher_ctxsize,
576 	.init = crypto_init_blkcipher_ops,
577 #ifdef CONFIG_PROC_FS
578 	.show = crypto_blkcipher_show,
579 #endif
580 	.report = crypto_blkcipher_report,
581 };
582 EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
583 
crypto_grab_nivcipher(struct crypto_skcipher_spawn * spawn,const char * name,u32 type,u32 mask)584 static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
585 				const char *name, u32 type, u32 mask)
586 {
587 	struct crypto_alg *alg;
588 	int err;
589 
590 	type = crypto_skcipher_type(type);
591 	mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
592 
593 	alg = crypto_alg_mod_lookup(name, type, mask);
594 	if (IS_ERR(alg))
595 		return PTR_ERR(alg);
596 
597 	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
598 	crypto_mod_put(alg);
599 	return err;
600 }
601 
skcipher_geniv_alloc(struct crypto_template * tmpl,struct rtattr ** tb,u32 type,u32 mask)602 struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
603 					     struct rtattr **tb, u32 type,
604 					     u32 mask)
605 {
606 	struct {
607 		int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
608 			      unsigned int keylen);
609 		int (*encrypt)(struct ablkcipher_request *req);
610 		int (*decrypt)(struct ablkcipher_request *req);
611 
612 		unsigned int min_keysize;
613 		unsigned int max_keysize;
614 		unsigned int ivsize;
615 
616 		const char *geniv;
617 	} balg;
618 	const char *name;
619 	struct crypto_skcipher_spawn *spawn;
620 	struct crypto_attr_type *algt;
621 	struct crypto_instance *inst;
622 	struct crypto_alg *alg;
623 	int err;
624 
625 	algt = crypto_get_attr_type(tb);
626 	if (IS_ERR(algt))
627 		return ERR_CAST(algt);
628 
629 	if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
630 	    algt->mask)
631 		return ERR_PTR(-EINVAL);
632 
633 	name = crypto_attr_alg_name(tb[1]);
634 	if (IS_ERR(name))
635 		return ERR_CAST(name);
636 
637 	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
638 	if (!inst)
639 		return ERR_PTR(-ENOMEM);
640 
641 	spawn = crypto_instance_ctx(inst);
642 
643 	/* Ignore async algorithms if necessary. */
644 	mask |= crypto_requires_sync(algt->type, algt->mask);
645 
646 	crypto_set_skcipher_spawn(spawn, inst);
647 	err = crypto_grab_nivcipher(spawn, name, type, mask);
648 	if (err)
649 		goto err_free_inst;
650 
651 	alg = crypto_skcipher_spawn_alg(spawn);
652 
653 	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
654 	    CRYPTO_ALG_TYPE_BLKCIPHER) {
655 		balg.ivsize = alg->cra_blkcipher.ivsize;
656 		balg.min_keysize = alg->cra_blkcipher.min_keysize;
657 		balg.max_keysize = alg->cra_blkcipher.max_keysize;
658 
659 		balg.setkey = async_setkey;
660 		balg.encrypt = async_encrypt;
661 		balg.decrypt = async_decrypt;
662 
663 		balg.geniv = alg->cra_blkcipher.geniv;
664 	} else {
665 		balg.ivsize = alg->cra_ablkcipher.ivsize;
666 		balg.min_keysize = alg->cra_ablkcipher.min_keysize;
667 		balg.max_keysize = alg->cra_ablkcipher.max_keysize;
668 
669 		balg.setkey = alg->cra_ablkcipher.setkey;
670 		balg.encrypt = alg->cra_ablkcipher.encrypt;
671 		balg.decrypt = alg->cra_ablkcipher.decrypt;
672 
673 		balg.geniv = alg->cra_ablkcipher.geniv;
674 	}
675 
676 	err = -EINVAL;
677 	if (!balg.ivsize)
678 		goto err_drop_alg;
679 
680 	/*
681 	 * This is only true if we're constructing an algorithm with its
682 	 * default IV generator.  For the default generator we elide the
683 	 * template name and double-check the IV generator.
684 	 */
685 	if (algt->mask & CRYPTO_ALG_GENIV) {
686 		if (!balg.geniv)
687 			balg.geniv = crypto_default_geniv(alg);
688 		err = -EAGAIN;
689 		if (strcmp(tmpl->name, balg.geniv))
690 			goto err_drop_alg;
691 
692 		memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
693 		memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
694 		       CRYPTO_MAX_ALG_NAME);
695 	} else {
696 		err = -ENAMETOOLONG;
697 		if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
698 			     "%s(%s)", tmpl->name, alg->cra_name) >=
699 		    CRYPTO_MAX_ALG_NAME)
700 			goto err_drop_alg;
701 		if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
702 			     "%s(%s)", tmpl->name, alg->cra_driver_name) >=
703 		    CRYPTO_MAX_ALG_NAME)
704 			goto err_drop_alg;
705 	}
706 
707 	inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
708 	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
709 	inst->alg.cra_priority = alg->cra_priority;
710 	inst->alg.cra_blocksize = alg->cra_blocksize;
711 	inst->alg.cra_alignmask = alg->cra_alignmask;
712 	inst->alg.cra_type = &crypto_givcipher_type;
713 
714 	inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
715 	inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
716 	inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
717 	inst->alg.cra_ablkcipher.geniv = balg.geniv;
718 
719 	inst->alg.cra_ablkcipher.setkey = balg.setkey;
720 	inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
721 	inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
722 
723 out:
724 	return inst;
725 
726 err_drop_alg:
727 	crypto_drop_skcipher(spawn);
728 err_free_inst:
729 	kfree(inst);
730 	inst = ERR_PTR(err);
731 	goto out;
732 }
733 EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
734 
skcipher_geniv_free(struct crypto_instance * inst)735 void skcipher_geniv_free(struct crypto_instance *inst)
736 {
737 	crypto_drop_skcipher(crypto_instance_ctx(inst));
738 	kfree(inst);
739 }
740 EXPORT_SYMBOL_GPL(skcipher_geniv_free);
741 
skcipher_geniv_init(struct crypto_tfm * tfm)742 int skcipher_geniv_init(struct crypto_tfm *tfm)
743 {
744 	struct crypto_instance *inst = (void *)tfm->__crt_alg;
745 	struct crypto_ablkcipher *cipher;
746 
747 	cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
748 	if (IS_ERR(cipher))
749 		return PTR_ERR(cipher);
750 
751 	tfm->crt_ablkcipher.base = cipher;
752 	tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
753 
754 	return 0;
755 }
756 EXPORT_SYMBOL_GPL(skcipher_geniv_init);
757 
skcipher_geniv_exit(struct crypto_tfm * tfm)758 void skcipher_geniv_exit(struct crypto_tfm *tfm)
759 {
760 	crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
761 }
762 EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
763 
764 MODULE_LICENSE("GPL");
765 MODULE_DESCRIPTION("Generic block chaining cipher type");
766