• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  *  sep_crypto.c - Crypto interface structures
4  *
5  *  Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6  *  Contributions(c) 2009-2010 Discretix. All rights reserved.
7  *
8  *  This program is free software; you can redistribute it and/or modify it
9  *  under the terms of the GNU General Public License as published by the Free
10  *  Software Foundation; version 2 of the License.
11  *
12  *  This program is distributed in the hope that it will be useful, but WITHOUT
13  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  *  more details.
16  *
17  *  You should have received a copy of the GNU General Public License along with
18  *  this program; if not, write to the Free Software Foundation, Inc., 59
19  *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
20  *
21  *  CONTACTS:
22  *
23  *  Mark Allyn		mark.a.allyn@intel.com
24  *  Jayant Mangalampalli jayant.mangalampalli@intel.com
25  *
26  *  CHANGES:
27  *
28  *  2009.06.26	Initial publish
29  *  2010.09.14  Upgrade to Medfield
30  *  2011.02.22  Enable Kernel Crypto
31  *
32  */
33 
34 /* #define DEBUG */
35 #include <linux/init.h>
36 #include <linux/module.h>
37 #include <linux/miscdevice.h>
38 #include <linux/fs.h>
39 #include <linux/cdev.h>
40 #include <linux/kdev_t.h>
41 #include <linux/mutex.h>
42 #include <linux/sched.h>
43 #include <linux/mm.h>
44 #include <linux/poll.h>
45 #include <linux/wait.h>
46 #include <linux/pci.h>
47 #include <linux/pm_runtime.h>
48 #include <linux/err.h>
49 #include <linux/device.h>
50 #include <linux/errno.h>
51 #include <linux/interrupt.h>
52 #include <linux/kernel.h>
53 #include <linux/clk.h>
54 #include <linux/irq.h>
55 #include <linux/io.h>
56 #include <linux/platform_device.h>
57 #include <linux/list.h>
58 #include <linux/dma-mapping.h>
59 #include <linux/delay.h>
60 #include <linux/jiffies.h>
61 #include <linux/workqueue.h>
62 #include <linux/crypto.h>
63 #include <crypto/internal/hash.h>
64 #include <crypto/scatterwalk.h>
65 #include <crypto/sha.h>
66 #include <crypto/md5.h>
67 #include <crypto/aes.h>
68 #include <crypto/des.h>
69 #include <crypto/hash.h>
70 #include "sep_driver_hw_defs.h"
71 #include "sep_driver_config.h"
72 #include "sep_driver_api.h"
73 #include "sep_dev.h"
74 #include "sep_crypto.h"
75 
76 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
77 
78 /* Globals for queuing */
79 static spinlock_t queue_lock;
80 static struct crypto_queue sep_queue;
81 
82 /* Declare of dequeuer */
83 static void sep_dequeuer(void *data);
84 
85 /* TESTING */
86 /**
87  *	sep_do_callback
88  *	@work: pointer to work_struct
89  *	This is what is called by the queue; it is generic so that it
90  *	can be used by any type of operation as each different callback
91  *	function can use the data parameter in its own way
92  */
sep_do_callback(struct work_struct * work)93 static void sep_do_callback(struct work_struct *work)
94 {
95 	struct sep_work_struct *sep_work = container_of(work,
96 		struct sep_work_struct, work);
97 	if (sep_work != NULL) {
98 		(sep_work->callback)(sep_work->data);
99 		kfree(sep_work);
100 	} else {
101 		pr_debug("sep crypto: do callback - NULL container\n");
102 	}
103 }
104 
105 /**
106  *	sep_submit_work
107  *	@work_queue: pointer to struct_workqueue
108  *	@funct: pointer to function to execute
109  *	@data: pointer to data; function will know
110  *		how to use it
111  *	This is a generic API to submit something to
112  *	the queue. The callback function will depend
113  *	on what operation is to be done
114  */
sep_submit_work(struct workqueue_struct * work_queue,void (* funct)(void *),void * data)115 static int sep_submit_work(struct workqueue_struct *work_queue,
116 	void(*funct)(void *),
117 	void *data)
118 {
119 	struct sep_work_struct *sep_work;
120 	int result;
121 
122 	sep_work = kmalloc(sizeof(struct sep_work_struct), GFP_ATOMIC);
123 
124 	if (sep_work == NULL) {
125 		pr_debug("sep crypto: cant allocate work structure\n");
126 		return -ENOMEM;
127 	}
128 
129 	sep_work->callback = funct;
130 	sep_work->data = data;
131 	INIT_WORK(&sep_work->work, sep_do_callback);
132 	result = queue_work(work_queue, &sep_work->work);
133 	if (!result) {
134 		pr_debug("sep_crypto: queue_work failed\n");
135 		return -EINVAL;
136 	}
137 	return 0;
138 }
139 
140 /**
141  *	sep_alloc_sg_buf -
142  *	@sep: pointer to struct sep_device
143  *	@size: total size of area
144  *	@block_size: minimum size of chunks
145  *	each page is minimum or modulo this size
146  *	@returns: pointer to struct scatterlist for new
147  *	buffer
148  **/
sep_alloc_sg_buf(struct sep_device * sep,size_t size,size_t block_size)149 static struct scatterlist *sep_alloc_sg_buf(
150 	struct sep_device *sep,
151 	size_t size,
152 	size_t block_size)
153 {
154 	u32 nbr_pages;
155 	u32 ct1;
156 	void *buf;
157 	size_t current_size;
158 	size_t real_page_size;
159 
160 	struct scatterlist *sg, *sg_temp;
161 
162 	if (size == 0)
163 		return NULL;
164 
165 	dev_dbg(&sep->pdev->dev, "sep alloc sg buf\n");
166 
167 	current_size = 0;
168 	nbr_pages = 0;
169 	real_page_size = PAGE_SIZE - (PAGE_SIZE % block_size);
170 	/**
171 	 * The size of each page must be modulo of the operation
172 	 * block size; increment by the modified page size until
173 	 * the total size is reached, then you have the number of
174 	 * pages
175 	 */
176 	while (current_size < size) {
177 		current_size += real_page_size;
178 		nbr_pages += 1;
179 	}
180 
181 	sg = kmalloc_array(nbr_pages, sizeof(struct scatterlist), GFP_ATOMIC);
182 	if (!sg)
183 		return NULL;
184 
185 	sg_init_table(sg, nbr_pages);
186 
187 	current_size = 0;
188 	sg_temp = sg;
189 	for (ct1 = 0; ct1 < nbr_pages; ct1 += 1) {
190 		buf = (void *)get_zeroed_page(GFP_ATOMIC);
191 		if (!buf) {
192 			dev_warn(&sep->pdev->dev,
193 				"Cannot allocate page for new buffer\n");
194 			kfree(sg);
195 			return NULL;
196 		}
197 
198 		sg_set_buf(sg_temp, buf, real_page_size);
199 		if ((size - current_size) > real_page_size) {
200 			sg_temp->length = real_page_size;
201 			current_size += real_page_size;
202 		} else {
203 			sg_temp->length = (size - current_size);
204 			current_size = size;
205 		}
206 		sg_temp = sg_next(sg);
207 	}
208 	return sg;
209 }
210 
211 /**
212  *	sep_free_sg_buf -
213  *	@sg: pointer to struct scatterlist; points to area to free
214  */
sep_free_sg_buf(struct scatterlist * sg)215 static void sep_free_sg_buf(struct scatterlist *sg)
216 {
217 	struct scatterlist *sg_temp = sg;
218 		while (sg_temp) {
219 			free_page((unsigned long)sg_virt(sg_temp));
220 			sg_temp = sg_next(sg_temp);
221 		}
222 		kfree(sg);
223 }
224 
225 /**
226  *	sep_copy_sg -
227  *	@sep: pointer to struct sep_device
228  *	@sg_src: pointer to struct scatterlist for source
229  *	@sg_dst: pointer to struct scatterlist for destination
230  *      @size: size (in bytes) of data to copy
231  *
232  *	Copy data from one scatterlist to another; both must
233  *	be the same size
234  */
sep_copy_sg(struct sep_device * sep,struct scatterlist * sg_src,struct scatterlist * sg_dst,size_t size)235 static void sep_copy_sg(
236 	struct sep_device *sep,
237 	struct scatterlist *sg_src,
238 	struct scatterlist *sg_dst,
239 	size_t size)
240 {
241 	u32 seg_size;
242 	u32 in_offset, out_offset;
243 
244 	u32 count = 0;
245 	struct scatterlist *sg_src_tmp = sg_src;
246 	struct scatterlist *sg_dst_tmp = sg_dst;
247 	in_offset = 0;
248 	out_offset = 0;
249 
250 	dev_dbg(&sep->pdev->dev, "sep copy sg\n");
251 
252 	if ((sg_src == NULL) || (sg_dst == NULL) || (size == 0))
253 		return;
254 
255 	dev_dbg(&sep->pdev->dev, "sep copy sg not null\n");
256 
257 	while (count < size) {
258 		if ((sg_src_tmp->length - in_offset) >
259 			(sg_dst_tmp->length - out_offset))
260 			seg_size = sg_dst_tmp->length - out_offset;
261 		else
262 			seg_size = sg_src_tmp->length - in_offset;
263 
264 		if (seg_size > (size - count))
265 			seg_size = (size = count);
266 
267 		memcpy(sg_virt(sg_dst_tmp) + out_offset,
268 			sg_virt(sg_src_tmp) + in_offset,
269 			seg_size);
270 
271 		in_offset += seg_size;
272 		out_offset += seg_size;
273 		count += seg_size;
274 
275 		if (in_offset >= sg_src_tmp->length) {
276 			sg_src_tmp = sg_next(sg_src_tmp);
277 			in_offset = 0;
278 		}
279 
280 		if (out_offset >= sg_dst_tmp->length) {
281 			sg_dst_tmp = sg_next(sg_dst_tmp);
282 			out_offset = 0;
283 		}
284 	}
285 }
286 
287 /**
288  *	sep_oddball_pages -
289  *	@sep: pointer to struct sep_device
290  *	@sg: pointer to struct scatterlist - buffer to check
291  *	@size: total data size
292  *	@blocksize: minimum block size; must be multiples of this size
293  *	@to_copy: 1 means do copy, 0 means do not copy
294  *	@new_sg: pointer to location to put pointer to new sg area
295  *	@returns: 1 if new scatterlist is needed; 0 if not needed;
296  *		error value if operation failed
297  *
298  *	The SEP device requires all pages to be multiples of the
299  *	minimum block size appropriate for the operation
300  *	This function check all pages; if any are oddball sizes
301  *	(not multiple of block sizes), it creates a new scatterlist.
302  *	If the to_copy parameter is set to 1, then a scatter list
303  *	copy is performed. The pointer to the new scatterlist is
304  *	put into the address supplied by the new_sg parameter; if
305  *	no new scatterlist is needed, then a NULL is put into
306  *	the location at new_sg.
307  *
308  */
sep_oddball_pages(struct sep_device * sep,struct scatterlist * sg,size_t data_size,u32 block_size,struct scatterlist ** new_sg,u32 do_copy)309 static int sep_oddball_pages(
310 	struct sep_device *sep,
311 	struct scatterlist *sg,
312 	size_t data_size,
313 	u32 block_size,
314 	struct scatterlist **new_sg,
315 	u32 do_copy)
316 {
317 	struct scatterlist *sg_temp;
318 	u32 flag;
319 	u32 nbr_pages, page_count;
320 
321 	dev_dbg(&sep->pdev->dev, "sep oddball\n");
322 	if ((sg == NULL) || (data_size == 0) || (data_size < block_size))
323 		return 0;
324 
325 	dev_dbg(&sep->pdev->dev, "sep oddball not null\n");
326 	flag = 0;
327 	nbr_pages = 0;
328 	page_count = 0;
329 	sg_temp = sg;
330 
331 	while (sg_temp) {
332 		nbr_pages += 1;
333 		sg_temp = sg_next(sg_temp);
334 	}
335 
336 	sg_temp = sg;
337 	while ((sg_temp) && (flag == 0)) {
338 		page_count += 1;
339 		if (sg_temp->length % block_size)
340 			flag = 1;
341 		else
342 			sg_temp = sg_next(sg_temp);
343 	}
344 
345 	/* Do not process if last (or only) page is oddball */
346 	if (nbr_pages == page_count)
347 		flag = 0;
348 
349 	if (flag) {
350 		dev_dbg(&sep->pdev->dev, "sep oddball processing\n");
351 		*new_sg = sep_alloc_sg_buf(sep, data_size, block_size);
352 		if (*new_sg == NULL) {
353 			dev_warn(&sep->pdev->dev, "cannot allocate new sg\n");
354 			return -ENOMEM;
355 		}
356 
357 		if (do_copy)
358 			sep_copy_sg(sep, sg, *new_sg, data_size);
359 
360 		return 1;
361 	} else {
362 		return 0;
363 	}
364 }
365 
366 /**
367  *	sep_copy_offset_sg -
368  *	@sep: pointer to struct sep_device;
369  *	@sg: pointer to struct scatterlist
370  *	@offset: offset into scatterlist memory
371  *	@dst: place to put data
372  *	@len: length of data
373  *	@returns: number of bytes copies
374  *
375  *	This copies data from scatterlist buffer
376  *	offset from beginning - it is needed for
377  *	handling tail data in hash
378  */
sep_copy_offset_sg(struct sep_device * sep,struct scatterlist * sg,u32 offset,void * dst,u32 len)379 static size_t sep_copy_offset_sg(
380 	struct sep_device *sep,
381 	struct scatterlist *sg,
382 	u32 offset,
383 	void *dst,
384 	u32 len)
385 {
386 	size_t page_start;
387 	size_t page_end;
388 	size_t offset_within_page;
389 	size_t length_within_page;
390 	size_t length_remaining;
391 	size_t current_offset;
392 
393 	/* Find which page is beginning of segment */
394 	page_start = 0;
395 	page_end = sg->length;
396 	while ((sg) && (offset > page_end)) {
397 		page_start += sg->length;
398 		sg = sg_next(sg);
399 		if (sg)
400 			page_end += sg->length;
401 	}
402 
403 	if (sg == NULL)
404 		return -ENOMEM;
405 
406 	offset_within_page = offset - page_start;
407 	if ((sg->length - offset_within_page) >= len) {
408 		/* All within this page */
409 		memcpy(dst, sg_virt(sg) + offset_within_page, len);
410 		return len;
411 	} else {
412 		/* Scattered multiple pages */
413 		current_offset = 0;
414 		length_remaining = len;
415 		while ((sg) && (current_offset < len)) {
416 			length_within_page = sg->length - offset_within_page;
417 			if (length_within_page >= length_remaining) {
418 				memcpy(dst+current_offset,
419 					sg_virt(sg) + offset_within_page,
420 					length_remaining);
421 				length_remaining = 0;
422 				current_offset = len;
423 			} else {
424 				memcpy(dst+current_offset,
425 					sg_virt(sg) + offset_within_page,
426 					length_within_page);
427 				length_remaining -= length_within_page;
428 				current_offset += length_within_page;
429 				offset_within_page = 0;
430 				sg = sg_next(sg);
431 			}
432 		}
433 
434 		if (sg == NULL)
435 			return -ENOMEM;
436 	}
437 	return len;
438 }
439 
440 /**
441  *	partial_overlap -
442  *	@src_ptr: source pointer
443  *	@dst_ptr: destination pointer
444  *	@nbytes: number of bytes
445  *	@returns: 0 for success; -1 for failure
446  *	We cannot have any partial overlap. Total overlap
447  *	where src is the same as dst is okay
448  */
partial_overlap(void * src_ptr,void * dst_ptr,u32 nbytes)449 static int partial_overlap(void *src_ptr, void *dst_ptr, u32 nbytes)
450 {
451 	/* Check for partial overlap */
452 	if (src_ptr != dst_ptr) {
453 		if (src_ptr < dst_ptr) {
454 			if ((src_ptr + nbytes) > dst_ptr)
455 				return -EINVAL;
456 		} else {
457 			if ((dst_ptr + nbytes) > src_ptr)
458 				return -EINVAL;
459 		}
460 	}
461 
462 	return 0;
463 }
464 
465 /* Debug - prints only if DEBUG is defined */
sep_dump_ivs(struct ablkcipher_request * req,char * reason)466 static void sep_dump_ivs(struct ablkcipher_request *req, char *reason)
467 
468 	{
469 	unsigned char *cptr;
470 	struct sep_aes_internal_context *aes_internal;
471 	struct sep_des_internal_context *des_internal;
472 	int ct1;
473 
474 	struct this_task_ctx *ta_ctx;
475 	struct crypto_ablkcipher *tfm;
476 	struct sep_system_ctx *sctx;
477 
478 	ta_ctx = ablkcipher_request_ctx(req);
479 	tfm = crypto_ablkcipher_reqtfm(req);
480 	sctx = crypto_ablkcipher_ctx(tfm);
481 
482 	dev_dbg(&ta_ctx->sep_used->pdev->dev, "IV DUMP - %s\n", reason);
483 	if ((ta_ctx->current_request == DES_CBC) &&
484 		(ta_ctx->des_opmode == SEP_DES_CBC)) {
485 
486 		des_internal = (struct sep_des_internal_context *)
487 			sctx->des_private_ctx.ctx_buf;
488 		/* print vendor */
489 		dev_dbg(&ta_ctx->sep_used->pdev->dev,
490 			"sep - vendor iv for DES\n");
491 		cptr = (unsigned char *)des_internal->iv_context;
492 		for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
493 			dev_dbg(&ta_ctx->sep_used->pdev->dev,
494 				"%02x\n", *(cptr + ct1));
495 
496 		/* print walk */
497 		dev_dbg(&ta_ctx->sep_used->pdev->dev,
498 			"sep - walk from kernel crypto iv for DES\n");
499 		cptr = (unsigned char *)ta_ctx->walk.iv;
500 		for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
501 			dev_dbg(&ta_ctx->sep_used->pdev->dev,
502 				"%02x\n", *(cptr + ct1));
503 	} else if ((ta_ctx->current_request == AES_CBC) &&
504 		(ta_ctx->aes_opmode == SEP_AES_CBC)) {
505 
506 		aes_internal = (struct sep_aes_internal_context *)
507 			sctx->aes_private_ctx.cbuff;
508 		/* print vendor */
509 		dev_dbg(&ta_ctx->sep_used->pdev->dev,
510 			"sep - vendor iv for AES\n");
511 		cptr = (unsigned char *)aes_internal->aes_ctx_iv;
512 		for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
513 			dev_dbg(&ta_ctx->sep_used->pdev->dev,
514 				"%02x\n", *(cptr + ct1));
515 
516 		/* print walk */
517 		dev_dbg(&ta_ctx->sep_used->pdev->dev,
518 			"sep - walk from kernel crypto iv for AES\n");
519 		cptr = (unsigned char *)ta_ctx->walk.iv;
520 		for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
521 			dev_dbg(&ta_ctx->sep_used->pdev->dev,
522 				"%02x\n", *(cptr + ct1));
523 	}
524 }
525 
526 /**
527  * RFC2451: Weak key check
528  * Returns: 1 (weak), 0 (not weak)
529  */
sep_weak_key(const u8 * key,unsigned int keylen)530 static int sep_weak_key(const u8 *key, unsigned int keylen)
531 {
532 	static const u8 parity[] = {
533 	8, 1, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 2, 8,
534 	0, 8, 8, 0, 8, 0, 0, 8, 8,
535 	0, 0, 8, 0, 8, 8, 3,
536 	0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
537 	8, 0, 0, 8, 0, 8, 8, 0, 0,
538 	8, 8, 0, 8, 0, 0, 8,
539 	0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
540 	8, 0, 0, 8, 0, 8, 8, 0, 0,
541 	8, 8, 0, 8, 0, 0, 8,
542 	8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
543 	0, 8, 8, 0, 8, 0, 0, 8, 8,
544 	0, 0, 8, 0, 8, 8, 0,
545 	0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
546 	8, 0, 0, 8, 0, 8, 8, 0, 0,
547 	8, 8, 0, 8, 0, 0, 8,
548 	8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
549 	0, 8, 8, 0, 8, 0, 0, 8, 8,
550 	0, 0, 8, 0, 8, 8, 0,
551 	8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
552 	0, 8, 8, 0, 8, 0, 0, 8, 8,
553 	0, 0, 8, 0, 8, 8, 0,
554 	4, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
555 	8, 5, 0, 8, 0, 8, 8, 0, 0,
556 	8, 8, 0, 8, 0, 6, 8,
557 	};
558 
559 	u32 n, w;
560 
561 	n  = parity[key[0]]; n <<= 4;
562 	n |= parity[key[1]]; n <<= 4;
563 	n |= parity[key[2]]; n <<= 4;
564 	n |= parity[key[3]]; n <<= 4;
565 	n |= parity[key[4]]; n <<= 4;
566 	n |= parity[key[5]]; n <<= 4;
567 	n |= parity[key[6]]; n <<= 4;
568 	n |= parity[key[7]];
569 	w = 0x88888888L;
570 
571 	/* 1 in 10^10 keys passes this test */
572 	if (!((n - (w >> 3)) & w)) {
573 		if (n < 0x41415151) {
574 			if (n < 0x31312121) {
575 				if (n < 0x14141515) {
576 					/* 01 01 01 01 01 01 01 01 */
577 					if (n == 0x11111111)
578 						goto weak;
579 					/* 01 1F 01 1F 01 0E 01 0E */
580 					if (n == 0x13131212)
581 						goto weak;
582 				} else {
583 					/* 01 E0 01 E0 01 F1 01 F1 */
584 					if (n == 0x14141515)
585 						goto weak;
586 					/* 01 FE 01 FE 01 FE 01 FE */
587 					if (n == 0x16161616)
588 						goto weak;
589 				}
590 			} else {
591 				if (n < 0x34342525) {
592 					/* 1F 01 1F 01 0E 01 0E 01 */
593 					if (n == 0x31312121)
594 						goto weak;
595 					/* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
596 					if (n == 0x33332222)
597 						goto weak;
598 				} else {
599 					/* 1F E0 1F E0 0E F1 0E F1 */
600 					if (n == 0x34342525)
601 						goto weak;
602 					/* 1F FE 1F FE 0E FE 0E FE */
603 					if (n == 0x36362626)
604 						goto weak;
605 				}
606 			}
607 		} else {
608 			if (n < 0x61616161) {
609 				if (n < 0x44445555) {
610 					/* E0 01 E0 01 F1 01 F1 01 */
611 					if (n == 0x41415151)
612 						goto weak;
613 					/* E0 1F E0 1F F1 0E F1 0E */
614 					if (n == 0x43435252)
615 						goto weak;
616 				} else {
617 					/* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
618 					if (n == 0x44445555)
619 						goto weak;
620 					/* E0 FE E0 FE F1 FE F1 FE */
621 					if (n == 0x46465656)
622 						goto weak;
623 				}
624 			} else {
625 				if (n < 0x64646565) {
626 					/* FE 01 FE 01 FE 01 FE 01 */
627 					if (n == 0x61616161)
628 						goto weak;
629 					/* FE 1F FE 1F FE 0E FE 0E */
630 					if (n == 0x63636262)
631 						goto weak;
632 				} else {
633 					/* FE E0 FE E0 FE F1 FE F1 */
634 					if (n == 0x64646565)
635 						goto weak;
636 					/* FE FE FE FE FE FE FE FE */
637 					if (n == 0x66666666)
638 						goto weak;
639 				}
640 			}
641 		}
642 	}
643 	return 0;
644 weak:
645 	return 1;
646 }
647 /**
648  *	sep_sg_nents
649  */
sep_sg_nents(struct scatterlist * sg)650 static u32 sep_sg_nents(struct scatterlist *sg)
651 {
652 	u32 ct1 = 0;
653 	while (sg) {
654 		ct1 += 1;
655 		sg = sg_next(sg);
656 	}
657 
658 	return ct1;
659 }
660 
661 /**
662  *	sep_start_msg -
663  *	@ta_ctx: pointer to struct this_task_ctx
664  *	@returns: offset to place for the next word in the message
665  *	Set up pointer in message pool for new message
666  */
sep_start_msg(struct this_task_ctx * ta_ctx)667 static u32 sep_start_msg(struct this_task_ctx *ta_ctx)
668 {
669 	u32 *word_ptr;
670 	ta_ctx->msg_len_words = 2;
671 	ta_ctx->msgptr = ta_ctx->msg;
672 	memset(ta_ctx->msg, 0, SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
673 	ta_ctx->msgptr += sizeof(u32) * 2;
674 	word_ptr = (u32 *)ta_ctx->msgptr;
675 	*word_ptr = SEP_START_MSG_TOKEN;
676 	return sizeof(u32) * 2;
677 }
678 
679 /**
680  *	sep_end_msg -
681  *	@ta_ctx: pointer to struct this_task_ctx
682  *	@messages_offset: current message offset
683  *	Returns: 0 for success; <0 otherwise
684  *	End message; set length and CRC; and
685  *	send interrupt to the SEP
686  */
sep_end_msg(struct this_task_ctx * ta_ctx,u32 msg_offset)687 static void sep_end_msg(struct this_task_ctx *ta_ctx, u32 msg_offset)
688 {
689 	u32 *word_ptr;
690 	/* Msg size goes into msg after token */
691 	ta_ctx->msg_len_words = msg_offset / sizeof(u32) + 1;
692 	word_ptr = (u32 *)ta_ctx->msgptr;
693 	word_ptr += 1;
694 	*word_ptr = ta_ctx->msg_len_words;
695 
696 	/* CRC (currently 0) goes at end of msg */
697 	word_ptr = (u32 *)(ta_ctx->msgptr + msg_offset);
698 	*word_ptr = 0;
699 }
700 
701 /**
702  *	sep_start_inbound_msg -
703  *	@ta_ctx: pointer to struct this_task_ctx
704  *	@msg_offset: offset to place for the next word in the message
705  *	@returns: 0 for success; error value for failure
706  *	Set up pointer in message pool for inbound message
707  */
sep_start_inbound_msg(struct this_task_ctx * ta_ctx,u32 * msg_offset)708 static u32 sep_start_inbound_msg(struct this_task_ctx *ta_ctx, u32 *msg_offset)
709 {
710 	u32 *word_ptr;
711 	u32 token;
712 	u32 error = SEP_OK;
713 
714 	*msg_offset = sizeof(u32) * 2;
715 	word_ptr = (u32 *)ta_ctx->msgptr;
716 	token = *word_ptr;
717 	ta_ctx->msg_len_words = *(word_ptr + 1);
718 
719 	if (token != SEP_START_MSG_TOKEN) {
720 		error = SEP_INVALID_START;
721 		goto end_function;
722 	}
723 
724 end_function:
725 
726 	return error;
727 }
728 
729 /**
730  *	sep_write_msg -
731  *	@ta_ctx: pointer to struct this_task_ctx
732  *	@in_addr: pointer to start of parameter
733  *	@size: size of parameter to copy (in bytes)
734  *	@max_size: size to move up offset; SEP mesg is in word sizes
735  *	@msg_offset: pointer to current offset (is updated)
736  *	@byte_array: flag ti indicate whether endian must be changed
737  *	Copies data into the message area from caller
738  */
sep_write_msg(struct this_task_ctx * ta_ctx,void * in_addr,u32 size,u32 max_size,u32 * msg_offset,u32 byte_array)739 static void sep_write_msg(struct this_task_ctx *ta_ctx, void *in_addr,
740 	u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
741 {
742 	u32 *word_ptr;
743 	void *void_ptr;
744 	void_ptr = ta_ctx->msgptr + *msg_offset;
745 	word_ptr = (u32 *)void_ptr;
746 	memcpy(void_ptr, in_addr, size);
747 	*msg_offset += max_size;
748 
749 	/* Do we need to manipulate endian? */
750 	if (byte_array) {
751 		u32 i;
752 		for (i = 0; i < ((size + 3) / 4); i += 1)
753 			*(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
754 	}
755 }
756 
757 /**
758  *	sep_make_header
759  *	@ta_ctx: pointer to struct this_task_ctx
760  *	@msg_offset: pointer to current offset (is updated)
761  *	@op_code: op code to put into message
762  *	Puts op code into message and updates offset
763  */
sep_make_header(struct this_task_ctx * ta_ctx,u32 * msg_offset,u32 op_code)764 static void sep_make_header(struct this_task_ctx *ta_ctx, u32 *msg_offset,
765 			    u32 op_code)
766 {
767 	u32 *word_ptr;
768 
769 	*msg_offset = sep_start_msg(ta_ctx);
770 	word_ptr = (u32 *)(ta_ctx->msgptr + *msg_offset);
771 	*word_ptr = op_code;
772 	*msg_offset += sizeof(u32);
773 }
774 
775 
776 
777 /**
778  *	sep_read_msg -
779  *	@ta_ctx: pointer to struct this_task_ctx
780  *	@in_addr: pointer to start of parameter
781  *	@size: size of parameter to copy (in bytes)
782  *	@max_size: size to move up offset; SEP mesg is in word sizes
783  *	@msg_offset: pointer to current offset (is updated)
784  *	@byte_array: flag ti indicate whether endian must be changed
785  *	Copies data out of the message area to caller
786  */
sep_read_msg(struct this_task_ctx * ta_ctx,void * in_addr,u32 size,u32 max_size,u32 * msg_offset,u32 byte_array)787 static void sep_read_msg(struct this_task_ctx *ta_ctx, void *in_addr,
788 	u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
789 {
790 	u32 *word_ptr;
791 	void *void_ptr;
792 	void_ptr = ta_ctx->msgptr + *msg_offset;
793 	word_ptr = (u32 *)void_ptr;
794 
795 	/* Do we need to manipulate endian? */
796 	if (byte_array) {
797 		u32 i;
798 		for (i = 0; i < ((size + 3) / 4); i += 1)
799 			*(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
800 	}
801 
802 	memcpy(in_addr, void_ptr, size);
803 	*msg_offset += max_size;
804 }
805 
806 /**
807  *	sep_verify_op -
808  *	@ta_ctx: pointer to struct this_task_ctx
809  *	@op_code: expected op_code
810  *      @msg_offset: pointer to current offset (is updated)
811  *	@returns: 0 for success; error for failure
812  */
sep_verify_op(struct this_task_ctx * ta_ctx,u32 op_code,u32 * msg_offset)813 static u32 sep_verify_op(struct this_task_ctx *ta_ctx, u32 op_code,
814 			 u32 *msg_offset)
815 {
816 	u32 error;
817 	u32 in_ary[2];
818 
819 	struct sep_device *sep = ta_ctx->sep_used;
820 
821 	dev_dbg(&sep->pdev->dev, "dumping return message\n");
822 	error = sep_start_inbound_msg(ta_ctx, msg_offset);
823 	if (error) {
824 		dev_warn(&sep->pdev->dev,
825 			"sep_start_inbound_msg error\n");
826 		return error;
827 	}
828 
829 	sep_read_msg(ta_ctx, in_ary, sizeof(u32) * 2, sizeof(u32) * 2,
830 		msg_offset, 0);
831 
832 	if (in_ary[0] != op_code) {
833 		dev_warn(&sep->pdev->dev,
834 			"sep got back wrong opcode\n");
835 		dev_warn(&sep->pdev->dev,
836 			"got back %x; expected %x\n",
837 			in_ary[0], op_code);
838 		return SEP_WRONG_OPCODE;
839 	}
840 
841 	if (in_ary[1] != SEP_OK) {
842 		dev_warn(&sep->pdev->dev,
843 			"sep execution error\n");
844 		dev_warn(&sep->pdev->dev,
845 			"got back %x; expected %x\n",
846 			in_ary[1], SEP_OK);
847 		return in_ary[0];
848 	}
849 
850 return 0;
851 }
852 
853 /**
854  * sep_read_context -
855  * @ta_ctx: pointer to struct this_task_ctx
856  * @msg_offset: point to current place in SEP msg; is updated
857  * @dst: pointer to place to put the context
858  * @len: size of the context structure (differs for crypro/hash)
859  * This function reads the context from the msg area
860  * There is a special way the vendor needs to have the maximum
861  * length calculated so that the msg_offset is updated properly;
862  * it skips over some words in the msg area depending on the size
863  * of the context
864  */
sep_read_context(struct this_task_ctx * ta_ctx,u32 * msg_offset,void * dst,u32 len)865 static void sep_read_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
866 	void *dst, u32 len)
867 {
868 	u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
869 	sep_read_msg(ta_ctx, dst, len, max_length, msg_offset, 0);
870 }
871 
872 /**
873  * sep_write_context -
874  * @ta_ctx: pointer to struct this_task_ctx
875  * @msg_offset: point to current place in SEP msg; is updated
876  * @src: pointer to the current context
877  * @len: size of the context structure (differs for crypro/hash)
878  * This function writes the context to the msg area
879  * There is a special way the vendor needs to have the maximum
880  * length calculated so that the msg_offset is updated properly;
881  * it skips over some words in the msg area depending on the size
882  * of the context
883  */
sep_write_context(struct this_task_ctx * ta_ctx,u32 * msg_offset,void * src,u32 len)884 static void sep_write_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
885 	void *src, u32 len)
886 {
887 	u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
888 	sep_write_msg(ta_ctx, src, len, max_length, msg_offset, 0);
889 }
890 
891 /**
892  * sep_clear_out -
893  * @ta_ctx: pointer to struct this_task_ctx
894  * Clear out crypto related values in sep device structure
895  * to enable device to be used by anyone; either kernel
896  * crypto or userspace app via middleware
897  */
sep_clear_out(struct this_task_ctx * ta_ctx)898 static void sep_clear_out(struct this_task_ctx *ta_ctx)
899 {
900 	if (ta_ctx->src_sg_hold) {
901 		sep_free_sg_buf(ta_ctx->src_sg_hold);
902 		ta_ctx->src_sg_hold = NULL;
903 	}
904 
905 	if (ta_ctx->dst_sg_hold) {
906 		sep_free_sg_buf(ta_ctx->dst_sg_hold);
907 		ta_ctx->dst_sg_hold = NULL;
908 	}
909 
910 	ta_ctx->src_sg = NULL;
911 	ta_ctx->dst_sg = NULL;
912 
913 	sep_free_dma_table_data_handler(ta_ctx->sep_used, &ta_ctx->dma_ctx);
914 
915 	if (ta_ctx->i_own_sep) {
916 		/**
917 		 * The following unlocks the sep and makes it available
918 		 * to any other application
919 		 * First, null out crypto entries in sep before releasing it
920 		 */
921 		ta_ctx->sep_used->current_hash_req = NULL;
922 		ta_ctx->sep_used->current_cypher_req = NULL;
923 		ta_ctx->sep_used->current_request = 0;
924 		ta_ctx->sep_used->current_hash_stage = 0;
925 		ta_ctx->sep_used->ta_ctx = NULL;
926 		ta_ctx->sep_used->in_kernel = 0;
927 
928 		ta_ctx->call_status.status = 0;
929 
930 		/* Remove anything confidential */
931 		memset(ta_ctx->sep_used->shared_addr, 0,
932 			SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
933 
934 		sep_queue_status_remove(ta_ctx->sep_used, &ta_ctx->queue_elem);
935 
936 #ifdef SEP_ENABLE_RUNTIME_PM
937 		ta_ctx->sep_used->in_use = 0;
938 		pm_runtime_mark_last_busy(&ta_ctx->sep_used->pdev->dev);
939 		pm_runtime_put_autosuspend(&ta_ctx->sep_used->pdev->dev);
940 #endif
941 
942 		clear_bit(SEP_WORKING_LOCK_BIT,
943 			&ta_ctx->sep_used->in_use_flags);
944 		ta_ctx->sep_used->pid_doing_transaction = 0;
945 
946 		dev_dbg(&ta_ctx->sep_used->pdev->dev,
947 			"[PID%d] waking up next transaction\n",
948 			current->pid);
949 
950 		clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
951 			&ta_ctx->sep_used->in_use_flags);
952 		wake_up(&ta_ctx->sep_used->event_transactions);
953 
954 		ta_ctx->i_own_sep = 0;
955 	}
956 }
957 
958 /**
959   * Release crypto infrastructure from EINPROGRESS and
960   * clear sep_dev so that SEP is available to anyone
961   */
sep_crypto_release(struct sep_system_ctx * sctx,struct this_task_ctx * ta_ctx,u32 error)962 static void sep_crypto_release(struct sep_system_ctx *sctx,
963 	struct this_task_ctx *ta_ctx, u32 error)
964 {
965 	struct ahash_request *hash_req = ta_ctx->current_hash_req;
966 	struct ablkcipher_request *cypher_req =
967 		ta_ctx->current_cypher_req;
968 	struct sep_device *sep = ta_ctx->sep_used;
969 
970 	sep_clear_out(ta_ctx);
971 
972 	/**
973 	 * This may not yet exist depending when we
974 	 * chose to bail out. If it does exist, set
975 	 * it to 1
976 	 */
977 	if (ta_ctx->are_we_done_yet != NULL)
978 		*ta_ctx->are_we_done_yet = 1;
979 
980 	if (cypher_req != NULL) {
981 		if ((sctx->key_sent == 1) ||
982 			((error != 0) && (error != -EINPROGRESS))) {
983 			if (cypher_req->base.complete == NULL) {
984 				dev_dbg(&sep->pdev->dev,
985 					"release is null for cypher!");
986 			} else {
987 				cypher_req->base.complete(
988 					&cypher_req->base, error);
989 			}
990 		}
991 	}
992 
993 	if (hash_req != NULL) {
994 		if (hash_req->base.complete == NULL) {
995 			dev_dbg(&sep->pdev->dev,
996 				"release is null for hash!");
997 		} else {
998 			hash_req->base.complete(
999 				&hash_req->base, error);
1000 		}
1001 	}
1002 }
1003 
1004 /**
1005  *	This is where we grab the sep itself and tell it to do something.
1006  *	It will sleep if the sep is currently busy
1007  *	and it will return 0 if sep is now ours; error value if there
1008  *	were problems
1009  */
sep_crypto_take_sep(struct this_task_ctx * ta_ctx)1010 static int sep_crypto_take_sep(struct this_task_ctx *ta_ctx)
1011 {
1012 	struct sep_device *sep = ta_ctx->sep_used;
1013 	int result;
1014 	struct sep_msgarea_hdr *my_msg_header;
1015 
1016 	my_msg_header = (struct sep_msgarea_hdr *)ta_ctx->msg;
1017 
1018 	/* add to status queue */
1019 	ta_ctx->queue_elem = sep_queue_status_add(sep, my_msg_header->opcode,
1020 		ta_ctx->nbytes, current->pid,
1021 		current->comm, sizeof(current->comm));
1022 
1023 	if (!ta_ctx->queue_elem) {
1024 		dev_dbg(&sep->pdev->dev,
1025 			"[PID%d] updating queue status error\n", current->pid);
1026 		return -EINVAL;
1027 	}
1028 
1029 	/* get the device; this can sleep */
1030 	result = sep_wait_transaction(sep);
1031 	if (result)
1032 		return result;
1033 
1034 	if (sep_dev->power_save_setup == 1)
1035 		pm_runtime_get_sync(&sep_dev->pdev->dev);
1036 
1037 	/* Copy in the message */
1038 	memcpy(sep->shared_addr, ta_ctx->msg,
1039 		SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1040 
1041 	/* Copy in the dcb information if there is any */
1042 	if (ta_ctx->dcb_region) {
1043 		result = sep_activate_dcb_dmatables_context(sep,
1044 			&ta_ctx->dcb_region, &ta_ctx->dmatables_region,
1045 			ta_ctx->dma_ctx);
1046 		if (result)
1047 			return result;
1048 	}
1049 
1050 	/* Mark the device so we know how to finish the job in the tasklet */
1051 	if (ta_ctx->current_hash_req)
1052 		sep->current_hash_req = ta_ctx->current_hash_req;
1053 	else
1054 		sep->current_cypher_req = ta_ctx->current_cypher_req;
1055 
1056 	sep->current_request = ta_ctx->current_request;
1057 	sep->current_hash_stage = ta_ctx->current_hash_stage;
1058 	sep->ta_ctx = ta_ctx;
1059 	sep->in_kernel = 1;
1060 	ta_ctx->i_own_sep = 1;
1061 
1062 	/* need to set bit first to avoid race condition with interrupt */
1063 	set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, &ta_ctx->call_status.status);
1064 
1065 	result = sep_send_command_handler(sep);
1066 
1067 	dev_dbg(&sep->pdev->dev, "[PID%d]: sending command to the sep\n",
1068 		current->pid);
1069 
1070 	if (!result)
1071 		dev_dbg(&sep->pdev->dev, "[PID%d]: command sent okay\n",
1072 			current->pid);
1073 	else {
1074 		dev_dbg(&sep->pdev->dev, "[PID%d]: cant send command\n",
1075 			current->pid);
1076 		clear_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
1077 			&ta_ctx->call_status.status);
1078 	}
1079 
1080 	return result;
1081 }
1082 
1083 /**
1084  * This function sets things up for a crypto data block process
1085  * This does all preparation, but does not try to grab the
1086  * sep
1087  * @req: pointer to struct ablkcipher_request
1088  * returns: 0 if all went well, non zero if error
1089  */
sep_crypto_block_data(struct ablkcipher_request * req)1090 static int sep_crypto_block_data(struct ablkcipher_request *req)
1091 {
1092 
1093 	int int_error;
1094 	u32 msg_offset;
1095 	static u32 msg[10];
1096 	void *src_ptr;
1097 	void *dst_ptr;
1098 
1099 	static char small_buf[100];
1100 	ssize_t copy_result;
1101 	int result;
1102 
1103 	struct scatterlist *new_sg;
1104 	struct this_task_ctx *ta_ctx;
1105 	struct crypto_ablkcipher *tfm;
1106 	struct sep_system_ctx *sctx;
1107 
1108 	struct sep_des_internal_context *des_internal;
1109 	struct sep_aes_internal_context *aes_internal;
1110 
1111 	ta_ctx = ablkcipher_request_ctx(req);
1112 	tfm = crypto_ablkcipher_reqtfm(req);
1113 	sctx = crypto_ablkcipher_ctx(tfm);
1114 
1115 	/* start the walk on scatterlists */
1116 	ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1117 	dev_dbg(&ta_ctx->sep_used->pdev->dev, "sep crypto block data size of %x\n",
1118 		req->nbytes);
1119 
1120 	int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1121 	if (int_error) {
1122 		dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1123 			int_error);
1124 		return -ENOMEM;
1125 	}
1126 
1127 	dev_dbg(&ta_ctx->sep_used->pdev->dev,
1128 		"crypto block: src is %lx dst is %lx\n",
1129 		(unsigned long)req->src, (unsigned long)req->dst);
1130 
1131 	/* Make sure all pages are even block */
1132 	int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
1133 		req->nbytes, ta_ctx->walk.blocksize, &new_sg, 1);
1134 
1135 	if (int_error < 0) {
1136 		dev_warn(&ta_ctx->sep_used->pdev->dev, "oddball page error\n");
1137 		return -ENOMEM;
1138 	} else if (int_error == 1) {
1139 		ta_ctx->src_sg = new_sg;
1140 		ta_ctx->src_sg_hold = new_sg;
1141 	} else {
1142 		ta_ctx->src_sg = req->src;
1143 		ta_ctx->src_sg_hold = NULL;
1144 	}
1145 
1146 	int_error = sep_oddball_pages(ta_ctx->sep_used, req->dst,
1147 		req->nbytes, ta_ctx->walk.blocksize, &new_sg, 0);
1148 
1149 	if (int_error < 0) {
1150 		dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1151 			int_error);
1152 		return -ENOMEM;
1153 	} else if (int_error == 1) {
1154 		ta_ctx->dst_sg = new_sg;
1155 		ta_ctx->dst_sg_hold = new_sg;
1156 	} else {
1157 		ta_ctx->dst_sg = req->dst;
1158 		ta_ctx->dst_sg_hold = NULL;
1159 	}
1160 
1161 	/* set nbytes for queue status */
1162 	ta_ctx->nbytes = req->nbytes;
1163 
1164 	/* Key already done; this is for data */
1165 	dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending data\n");
1166 
1167 	/* check for valid data and proper spacing */
1168 	src_ptr = sg_virt(ta_ctx->src_sg);
1169 	dst_ptr = sg_virt(ta_ctx->dst_sg);
1170 
1171 	if (!src_ptr || !dst_ptr ||
1172 		(ta_ctx->current_cypher_req->nbytes %
1173 		crypto_ablkcipher_blocksize(tfm))) {
1174 
1175 		dev_warn(&ta_ctx->sep_used->pdev->dev,
1176 			"cipher block size odd\n");
1177 		dev_warn(&ta_ctx->sep_used->pdev->dev,
1178 			"cipher block size is %x\n",
1179 			crypto_ablkcipher_blocksize(tfm));
1180 		dev_warn(&ta_ctx->sep_used->pdev->dev,
1181 			"cipher data size is %x\n",
1182 			ta_ctx->current_cypher_req->nbytes);
1183 		return -EINVAL;
1184 	}
1185 
1186 	if (partial_overlap(src_ptr, dst_ptr,
1187 		ta_ctx->current_cypher_req->nbytes)) {
1188 		dev_warn(&ta_ctx->sep_used->pdev->dev,
1189 			"block partial overlap\n");
1190 		return -EINVAL;
1191 	}
1192 
1193 	/* Put together the message */
1194 	sep_make_header(ta_ctx, &msg_offset, ta_ctx->block_opcode);
1195 
1196 	/* If des, and size is 1 block, put directly in msg */
1197 	if ((ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) &&
1198 		(req->nbytes == crypto_ablkcipher_blocksize(tfm))) {
1199 
1200 		dev_dbg(&ta_ctx->sep_used->pdev->dev,
1201 			"writing out one block des\n");
1202 
1203 		copy_result = sg_copy_to_buffer(
1204 			ta_ctx->src_sg, sep_sg_nents(ta_ctx->src_sg),
1205 			small_buf, crypto_ablkcipher_blocksize(tfm));
1206 
1207 		if (copy_result != crypto_ablkcipher_blocksize(tfm)) {
1208 			dev_warn(&ta_ctx->sep_used->pdev->dev,
1209 				"des block copy failed\n");
1210 			return -ENOMEM;
1211 		}
1212 
1213 		/* Put data into message */
1214 		sep_write_msg(ta_ctx, small_buf,
1215 			crypto_ablkcipher_blocksize(tfm),
1216 			crypto_ablkcipher_blocksize(tfm) * 2,
1217 			&msg_offset, 1);
1218 
1219 		/* Put size into message */
1220 		sep_write_msg(ta_ctx, &req->nbytes,
1221 			sizeof(u32), sizeof(u32), &msg_offset, 0);
1222 	} else {
1223 		/* Otherwise, fill out dma tables */
1224 		ta_ctx->dcb_input_data.app_in_address = src_ptr;
1225 		ta_ctx->dcb_input_data.data_in_size = req->nbytes;
1226 		ta_ctx->dcb_input_data.app_out_address = dst_ptr;
1227 		ta_ctx->dcb_input_data.block_size =
1228 			crypto_ablkcipher_blocksize(tfm);
1229 		ta_ctx->dcb_input_data.tail_block_size = 0;
1230 		ta_ctx->dcb_input_data.is_applet = 0;
1231 		ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
1232 		ta_ctx->dcb_input_data.dst_sg = ta_ctx->dst_sg;
1233 
1234 		result = sep_create_dcb_dmatables_context_kernel(
1235 			ta_ctx->sep_used,
1236 			&ta_ctx->dcb_region,
1237 			&ta_ctx->dmatables_region,
1238 			&ta_ctx->dma_ctx,
1239 			&ta_ctx->dcb_input_data,
1240 			1);
1241 		if (result) {
1242 			dev_warn(&ta_ctx->sep_used->pdev->dev,
1243 				"crypto dma table create failed\n");
1244 			return -EINVAL;
1245 		}
1246 
1247 		/* Portion of msg is nulled (no data) */
1248 		msg[0] = (u32)0;
1249 		msg[1] = (u32)0;
1250 		msg[2] = (u32)0;
1251 		msg[3] = (u32)0;
1252 		msg[4] = (u32)0;
1253 		sep_write_msg(ta_ctx, (void *)msg, sizeof(u32) * 5,
1254 			sizeof(u32) * 5, &msg_offset, 0);
1255 		}
1256 
1257 	/**
1258 	 * Before we write the message, we need to overwrite the
1259 	 * vendor's IV with the one from our own ablkcipher walk
1260 	 * iv because this is needed for dm-crypt
1261 	 */
1262 	sep_dump_ivs(req, "sending data block to sep\n");
1263 	if ((ta_ctx->current_request == DES_CBC) &&
1264 		(ta_ctx->des_opmode == SEP_DES_CBC)) {
1265 
1266 		dev_dbg(&ta_ctx->sep_used->pdev->dev,
1267 			"overwrite vendor iv on DES\n");
1268 		des_internal = (struct sep_des_internal_context *)
1269 			sctx->des_private_ctx.ctx_buf;
1270 		memcpy((void *)des_internal->iv_context,
1271 			ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1272 	} else if ((ta_ctx->current_request == AES_CBC) &&
1273 		(ta_ctx->aes_opmode == SEP_AES_CBC)) {
1274 
1275 		dev_dbg(&ta_ctx->sep_used->pdev->dev,
1276 			"overwrite vendor iv on AES\n");
1277 		aes_internal = (struct sep_aes_internal_context *)
1278 			sctx->aes_private_ctx.cbuff;
1279 		memcpy((void *)aes_internal->aes_ctx_iv,
1280 			ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1281 	}
1282 
1283 	/* Write context into message */
1284 	if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1285 		sep_write_context(ta_ctx, &msg_offset,
1286 			&sctx->des_private_ctx,
1287 			sizeof(struct sep_des_private_context));
1288 	} else {
1289 		sep_write_context(ta_ctx, &msg_offset,
1290 			&sctx->aes_private_ctx,
1291 			sizeof(struct sep_aes_private_context));
1292 	}
1293 
1294 	/* conclude message */
1295 	sep_end_msg(ta_ctx, msg_offset);
1296 
1297 	/* Parent (caller) is now ready to tell the sep to do ahead */
1298 	return 0;
1299 }
1300 
1301 
1302 /**
1303  * This function sets things up for a crypto key submit process
1304  * This does all preparation, but does not try to grab the
1305  * sep
1306  * @req: pointer to struct ablkcipher_request
1307  * returns: 0 if all went well, non zero if error
1308  */
sep_crypto_send_key(struct ablkcipher_request * req)1309 static int sep_crypto_send_key(struct ablkcipher_request *req)
1310 {
1311 
1312 	int int_error;
1313 	u32 msg_offset;
1314 	static u32 msg[10];
1315 
1316 	u32 max_length;
1317 	struct this_task_ctx *ta_ctx;
1318 	struct crypto_ablkcipher *tfm;
1319 	struct sep_system_ctx *sctx;
1320 
1321 	ta_ctx = ablkcipher_request_ctx(req);
1322 	tfm = crypto_ablkcipher_reqtfm(req);
1323 	sctx = crypto_ablkcipher_ctx(tfm);
1324 
1325 	dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending key\n");
1326 
1327 	/* start the walk on scatterlists */
1328 	ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1329 	dev_dbg(&ta_ctx->sep_used->pdev->dev,
1330 		"sep crypto block data size of %x\n", req->nbytes);
1331 
1332 	int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1333 	if (int_error) {
1334 		dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1335 			int_error);
1336 		return -ENOMEM;
1337 	}
1338 
1339 	/* check iv */
1340 	if ((ta_ctx->current_request == DES_CBC) &&
1341 		(ta_ctx->des_opmode == SEP_DES_CBC)) {
1342 		if (!ta_ctx->walk.iv) {
1343 			dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1344 			return -EINVAL;
1345 		}
1346 
1347 		memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
1348 	}
1349 
1350 	if ((ta_ctx->current_request == AES_CBC) &&
1351 		(ta_ctx->aes_opmode == SEP_AES_CBC)) {
1352 		if (!ta_ctx->walk.iv) {
1353 			dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1354 			return -EINVAL;
1355 		}
1356 
1357 		memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
1358 	}
1359 
1360 	/* put together message to SEP */
1361 	/* Start with op code */
1362 	sep_make_header(ta_ctx, &msg_offset, ta_ctx->init_opcode);
1363 
1364 	/* now deal with IV */
1365 	if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1366 		if (ta_ctx->des_opmode == SEP_DES_CBC) {
1367 			sep_write_msg(ta_ctx, ta_ctx->iv,
1368 				SEP_DES_IV_SIZE_BYTES, sizeof(u32) * 4,
1369 				&msg_offset, 1);
1370 		} else {
1371 			/* Skip if ECB */
1372 			msg_offset += 4 * sizeof(u32);
1373 		}
1374 	} else {
1375 		max_length = ((SEP_AES_IV_SIZE_BYTES + 3) /
1376 			sizeof(u32)) * sizeof(u32);
1377 		if (ta_ctx->aes_opmode == SEP_AES_CBC) {
1378 			sep_write_msg(ta_ctx, ta_ctx->iv,
1379 				SEP_AES_IV_SIZE_BYTES, max_length,
1380 				&msg_offset, 1);
1381 		} else {
1382 				/* Skip if ECB */
1383 				msg_offset += max_length;
1384 			}
1385 		}
1386 
1387 	/* load the key */
1388 	if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1389 		sep_write_msg(ta_ctx, (void *)&sctx->key.des.key1,
1390 			sizeof(u32) * 8, sizeof(u32) * 8,
1391 			&msg_offset, 1);
1392 
1393 		msg[0] = (u32)sctx->des_nbr_keys;
1394 		msg[1] = (u32)ta_ctx->des_encmode;
1395 		msg[2] = (u32)ta_ctx->des_opmode;
1396 
1397 		sep_write_msg(ta_ctx, (void *)msg,
1398 			sizeof(u32) * 3, sizeof(u32) * 3,
1399 			&msg_offset, 0);
1400 	} else {
1401 		sep_write_msg(ta_ctx, (void *)&sctx->key.aes,
1402 			sctx->keylen,
1403 			SEP_AES_MAX_KEY_SIZE_BYTES,
1404 			&msg_offset, 1);
1405 
1406 		msg[0] = (u32)sctx->aes_key_size;
1407 		msg[1] = (u32)ta_ctx->aes_encmode;
1408 		msg[2] = (u32)ta_ctx->aes_opmode;
1409 		msg[3] = (u32)0; /* Secret key is not used */
1410 		sep_write_msg(ta_ctx, (void *)msg,
1411 			sizeof(u32) * 4, sizeof(u32) * 4,
1412 			&msg_offset, 0);
1413 	}
1414 
1415 	/* conclude message */
1416 	sep_end_msg(ta_ctx, msg_offset);
1417 
1418 	/* Parent (caller) is now ready to tell the sep to do ahead */
1419 	return 0;
1420 }
1421 
1422 
1423 /* This needs to be run as a work queue as it can be put asleep */
sep_crypto_block(void * data)1424 static void sep_crypto_block(void *data)
1425 {
1426 	unsigned long end_time;
1427 
1428 	int result;
1429 
1430 	struct ablkcipher_request *req;
1431 	struct this_task_ctx *ta_ctx;
1432 	struct crypto_ablkcipher *tfm;
1433 	struct sep_system_ctx *sctx;
1434 	int are_we_done_yet;
1435 
1436 	req = (struct ablkcipher_request *)data;
1437 	ta_ctx = ablkcipher_request_ctx(req);
1438 	tfm = crypto_ablkcipher_reqtfm(req);
1439 	sctx = crypto_ablkcipher_ctx(tfm);
1440 
1441 	ta_ctx->are_we_done_yet = &are_we_done_yet;
1442 
1443 	pr_debug("sep_crypto_block\n");
1444 	pr_debug("tfm is %p sctx is %p ta_ctx is %p\n",
1445 		tfm, sctx, ta_ctx);
1446 	pr_debug("key_sent is %d\n", sctx->key_sent);
1447 
1448 	/* do we need to send the key */
1449 	if (sctx->key_sent == 0) {
1450 		are_we_done_yet = 0;
1451 		result = sep_crypto_send_key(req); /* prep to send key */
1452 		if (result != 0) {
1453 			dev_dbg(&ta_ctx->sep_used->pdev->dev,
1454 				"could not prep key %x\n", result);
1455 			sep_crypto_release(sctx, ta_ctx, result);
1456 			return;
1457 		}
1458 
1459 		result = sep_crypto_take_sep(ta_ctx);
1460 		if (result) {
1461 			dev_warn(&ta_ctx->sep_used->pdev->dev,
1462 				"sep_crypto_take_sep for key send failed\n");
1463 			sep_crypto_release(sctx, ta_ctx, result);
1464 			return;
1465 		}
1466 
1467 		/* now we sit and wait up to a fixed time for completion */
1468 		end_time = jiffies + (WAIT_TIME * HZ);
1469 		while ((time_before(jiffies, end_time)) &&
1470 			(are_we_done_yet == 0))
1471 			schedule();
1472 
1473 		/* Done waiting; still not done yet? */
1474 		if (are_we_done_yet == 0) {
1475 			dev_dbg(&ta_ctx->sep_used->pdev->dev,
1476 				"Send key job never got done\n");
1477 			sep_crypto_release(sctx, ta_ctx, -EINVAL);
1478 			return;
1479 		}
1480 
1481 		/* Set the key sent variable so this can be skipped later */
1482 		sctx->key_sent = 1;
1483 	}
1484 
1485 	/* Key sent (or maybe not if we did not have to), now send block */
1486 	are_we_done_yet = 0;
1487 
1488 	result = sep_crypto_block_data(req);
1489 
1490 	if (result != 0) {
1491 		dev_dbg(&ta_ctx->sep_used->pdev->dev,
1492 			"could prep not send block %x\n", result);
1493 		sep_crypto_release(sctx, ta_ctx, result);
1494 		return;
1495 	}
1496 
1497 	result = sep_crypto_take_sep(ta_ctx);
1498 	if (result) {
1499 		dev_warn(&ta_ctx->sep_used->pdev->dev,
1500 			"sep_crypto_take_sep for block send failed\n");
1501 		sep_crypto_release(sctx, ta_ctx, result);
1502 		return;
1503 	}
1504 
1505 	/* now we sit and wait up to a fixed time for completion */
1506 	end_time = jiffies + (WAIT_TIME * HZ);
1507 	while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
1508 		schedule();
1509 
1510 	/* Done waiting; still not done yet? */
1511 	if (are_we_done_yet == 0) {
1512 		dev_dbg(&ta_ctx->sep_used->pdev->dev,
1513 			"Send block job never got done\n");
1514 		sep_crypto_release(sctx, ta_ctx, -EINVAL);
1515 		return;
1516 	}
1517 
1518 	/* That's it; entire thing done, get out of queue */
1519 
1520 	pr_debug("crypto_block leaving\n");
1521 	pr_debug("tfm is %p sctx is %p ta_ctx is %p\n", tfm, sctx, ta_ctx);
1522 }
1523 
1524 /**
1525  * Post operation (after interrupt) for crypto block
1526  */
crypto_post_op(struct sep_device * sep)1527 static u32 crypto_post_op(struct sep_device *sep)
1528 {
1529 	/* HERE */
1530 	u32 u32_error;
1531 	u32 msg_offset;
1532 
1533 	ssize_t copy_result;
1534 	static char small_buf[100];
1535 
1536 	struct ablkcipher_request *req;
1537 	struct this_task_ctx *ta_ctx;
1538 	struct sep_system_ctx *sctx;
1539 	struct crypto_ablkcipher *tfm;
1540 
1541 	struct sep_des_internal_context *des_internal;
1542 	struct sep_aes_internal_context *aes_internal;
1543 
1544 	if (!sep->current_cypher_req)
1545 		return -EINVAL;
1546 
1547 	/* hold req since we need to submit work after clearing sep */
1548 	req = sep->current_cypher_req;
1549 
1550 	ta_ctx = ablkcipher_request_ctx(sep->current_cypher_req);
1551 	tfm = crypto_ablkcipher_reqtfm(sep->current_cypher_req);
1552 	sctx = crypto_ablkcipher_ctx(tfm);
1553 
1554 	pr_debug("crypto_post op\n");
1555 	pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1556 		sctx->key_sent, tfm, sctx, ta_ctx);
1557 
1558 	dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op\n");
1559 	dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op message dump\n");
1560 
1561 	/* first bring msg from shared area to local area */
1562 	memcpy(ta_ctx->msg, sep->shared_addr,
1563 		SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1564 
1565 	/* Is this the result of performing init (key to SEP */
1566 	if (sctx->key_sent == 0) {
1567 
1568 		/* Did SEP do it okay */
1569 		u32_error = sep_verify_op(ta_ctx, ta_ctx->init_opcode,
1570 			&msg_offset);
1571 		if (u32_error) {
1572 			dev_warn(&ta_ctx->sep_used->pdev->dev,
1573 				"aes init error %x\n", u32_error);
1574 			sep_crypto_release(sctx, ta_ctx, u32_error);
1575 			return u32_error;
1576 			}
1577 
1578 		/* Read Context */
1579 		if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1580 			sep_read_context(ta_ctx, &msg_offset,
1581 			&sctx->des_private_ctx,
1582 			sizeof(struct sep_des_private_context));
1583 		} else {
1584 			sep_read_context(ta_ctx, &msg_offset,
1585 			&sctx->aes_private_ctx,
1586 			sizeof(struct sep_aes_private_context));
1587 		}
1588 
1589 		sep_dump_ivs(req, "after sending key to sep\n");
1590 
1591 		/* key sent went okay; release sep, and set are_we_done_yet */
1592 		sctx->key_sent = 1;
1593 		sep_crypto_release(sctx, ta_ctx, -EINPROGRESS);
1594 
1595 	} else {
1596 
1597 		/**
1598 		 * This is the result of a block request
1599 		 */
1600 		dev_dbg(&ta_ctx->sep_used->pdev->dev,
1601 			"crypto_post_op block response\n");
1602 
1603 		u32_error = sep_verify_op(ta_ctx, ta_ctx->block_opcode,
1604 			&msg_offset);
1605 
1606 		if (u32_error) {
1607 			dev_warn(&ta_ctx->sep_used->pdev->dev,
1608 				"sep block error %x\n", u32_error);
1609 			sep_crypto_release(sctx, ta_ctx, u32_error);
1610 			return -EINVAL;
1611 			}
1612 
1613 		if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1614 
1615 			dev_dbg(&ta_ctx->sep_used->pdev->dev,
1616 				"post op for DES\n");
1617 
1618 			/* special case for 1 block des */
1619 			if (sep->current_cypher_req->nbytes ==
1620 				crypto_ablkcipher_blocksize(tfm)) {
1621 
1622 				sep_read_msg(ta_ctx, small_buf,
1623 					crypto_ablkcipher_blocksize(tfm),
1624 					crypto_ablkcipher_blocksize(tfm) * 2,
1625 					&msg_offset, 1);
1626 
1627 				dev_dbg(&ta_ctx->sep_used->pdev->dev,
1628 					"reading in block des\n");
1629 
1630 				copy_result = sg_copy_from_buffer(
1631 					ta_ctx->dst_sg,
1632 					sep_sg_nents(ta_ctx->dst_sg),
1633 					small_buf,
1634 					crypto_ablkcipher_blocksize(tfm));
1635 
1636 				if (copy_result !=
1637 					crypto_ablkcipher_blocksize(tfm)) {
1638 
1639 					dev_warn(&ta_ctx->sep_used->pdev->dev,
1640 						"des block copy failed\n");
1641 					sep_crypto_release(sctx, ta_ctx,
1642 						-ENOMEM);
1643 					return -ENOMEM;
1644 				}
1645 			}
1646 
1647 			/* Read Context */
1648 			sep_read_context(ta_ctx, &msg_offset,
1649 				&sctx->des_private_ctx,
1650 				sizeof(struct sep_des_private_context));
1651 		} else {
1652 
1653 			dev_dbg(&ta_ctx->sep_used->pdev->dev,
1654 				"post op for AES\n");
1655 
1656 			/* Skip the MAC Output */
1657 			msg_offset += (sizeof(u32) * 4);
1658 
1659 			/* Read Context */
1660 			sep_read_context(ta_ctx, &msg_offset,
1661 				&sctx->aes_private_ctx,
1662 				sizeof(struct sep_aes_private_context));
1663 		}
1664 
1665 		/* Copy to correct sg if this block had oddball pages */
1666 		if (ta_ctx->dst_sg_hold)
1667 			sep_copy_sg(ta_ctx->sep_used,
1668 				ta_ctx->dst_sg,
1669 				ta_ctx->current_cypher_req->dst,
1670 				ta_ctx->current_cypher_req->nbytes);
1671 
1672 		/**
1673 		 * Copy the iv's back to the walk.iv
1674 		 * This is required for dm_crypt
1675 		 */
1676 		sep_dump_ivs(req, "got data block from sep\n");
1677 		if ((ta_ctx->current_request == DES_CBC) &&
1678 			(ta_ctx->des_opmode == SEP_DES_CBC)) {
1679 
1680 			dev_dbg(&ta_ctx->sep_used->pdev->dev,
1681 				"returning result iv to walk on DES\n");
1682 			des_internal = (struct sep_des_internal_context *)
1683 				sctx->des_private_ctx.ctx_buf;
1684 			memcpy(ta_ctx->walk.iv,
1685 				(void *)des_internal->iv_context,
1686 				crypto_ablkcipher_ivsize(tfm));
1687 		} else if ((ta_ctx->current_request == AES_CBC) &&
1688 			(ta_ctx->aes_opmode == SEP_AES_CBC)) {
1689 
1690 			dev_dbg(&ta_ctx->sep_used->pdev->dev,
1691 				"returning result iv to walk on AES\n");
1692 			aes_internal = (struct sep_aes_internal_context *)
1693 				sctx->aes_private_ctx.cbuff;
1694 			memcpy(ta_ctx->walk.iv,
1695 				(void *)aes_internal->aes_ctx_iv,
1696 				crypto_ablkcipher_ivsize(tfm));
1697 		}
1698 
1699 		/* finished, release everything */
1700 		sep_crypto_release(sctx, ta_ctx, 0);
1701 	}
1702 	pr_debug("crypto_post_op done\n");
1703 	pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1704 		sctx->key_sent, tfm, sctx, ta_ctx);
1705 
1706 	return 0;
1707 }
1708 
hash_init_post_op(struct sep_device * sep)1709 static u32 hash_init_post_op(struct sep_device *sep)
1710 {
1711 	u32 u32_error;
1712 	u32 msg_offset;
1713 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1714 	struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1715 	struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1716 	dev_dbg(&ta_ctx->sep_used->pdev->dev,
1717 		"hash init post op\n");
1718 
1719 	/* first bring msg from shared area to local area */
1720 	memcpy(ta_ctx->msg, sep->shared_addr,
1721 		SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1722 
1723 	u32_error = sep_verify_op(ta_ctx, SEP_HASH_INIT_OPCODE,
1724 		&msg_offset);
1725 
1726 	if (u32_error) {
1727 		dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1728 			u32_error);
1729 		sep_crypto_release(sctx, ta_ctx, u32_error);
1730 		return u32_error;
1731 		}
1732 
1733 	/* Read Context */
1734 	sep_read_context(ta_ctx, &msg_offset,
1735 		&sctx->hash_private_ctx,
1736 		sizeof(struct sep_hash_private_context));
1737 
1738 	/* Signal to crypto infrastructure and clear out */
1739 	dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash init post op done\n");
1740 	sep_crypto_release(sctx, ta_ctx, 0);
1741 	return 0;
1742 }
1743 
hash_update_post_op(struct sep_device * sep)1744 static u32 hash_update_post_op(struct sep_device *sep)
1745 {
1746 	u32 u32_error;
1747 	u32 msg_offset;
1748 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1749 	struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1750 	struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1751 	dev_dbg(&ta_ctx->sep_used->pdev->dev,
1752 		"hash update post op\n");
1753 
1754 	/* first bring msg from shared area to local area */
1755 	memcpy(ta_ctx->msg, sep->shared_addr,
1756 		SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1757 
1758 	u32_error = sep_verify_op(ta_ctx, SEP_HASH_UPDATE_OPCODE,
1759 		&msg_offset);
1760 
1761 	if (u32_error) {
1762 		dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1763 			u32_error);
1764 		sep_crypto_release(sctx, ta_ctx, u32_error);
1765 		return u32_error;
1766 		}
1767 
1768 	/* Read Context */
1769 	sep_read_context(ta_ctx, &msg_offset,
1770 		&sctx->hash_private_ctx,
1771 		sizeof(struct sep_hash_private_context));
1772 
1773 	/**
1774 	 * Following is only for finup; if we just completed the
1775 	 * data portion of finup, we now need to kick off the
1776 	 * finish portion of finup.
1777 	 */
1778 
1779 	if (ta_ctx->sep_used->current_hash_stage == HASH_FINUP_DATA) {
1780 
1781 		/* first reset stage to HASH_FINUP_FINISH */
1782 		ta_ctx->sep_used->current_hash_stage = HASH_FINUP_FINISH;
1783 
1784 		/* now enqueue the finish operation */
1785 		spin_lock_irq(&queue_lock);
1786 		u32_error = crypto_enqueue_request(&sep_queue,
1787 			&ta_ctx->sep_used->current_hash_req->base);
1788 		spin_unlock_irq(&queue_lock);
1789 
1790 		if ((u32_error != 0) && (u32_error != -EINPROGRESS)) {
1791 			dev_warn(&ta_ctx->sep_used->pdev->dev,
1792 				"spe cypher post op cant queue\n");
1793 			sep_crypto_release(sctx, ta_ctx, u32_error);
1794 			return u32_error;
1795 		}
1796 
1797 		/* schedule the data send */
1798 		u32_error = sep_submit_work(ta_ctx->sep_used->workqueue,
1799 			sep_dequeuer, (void *)&sep_queue);
1800 
1801 		if (u32_error) {
1802 			dev_warn(&ta_ctx->sep_used->pdev->dev,
1803 				"cant submit work sep_crypto_block\n");
1804 			sep_crypto_release(sctx, ta_ctx, -EINVAL);
1805 			return -EINVAL;
1806 		}
1807 	}
1808 
1809 	/* Signal to crypto infrastructure and clear out */
1810 	dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash update post op done\n");
1811 	sep_crypto_release(sctx, ta_ctx, 0);
1812 	return 0;
1813 }
1814 
hash_final_post_op(struct sep_device * sep)1815 static u32 hash_final_post_op(struct sep_device *sep)
1816 {
1817 	int max_length;
1818 	u32 u32_error;
1819 	u32 msg_offset;
1820 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1821 	struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1822 	struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1823 	dev_dbg(&ta_ctx->sep_used->pdev->dev,
1824 		"hash final post op\n");
1825 
1826 	/* first bring msg from shared area to local area */
1827 	memcpy(ta_ctx->msg, sep->shared_addr,
1828 		SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1829 
1830 	u32_error = sep_verify_op(ta_ctx, SEP_HASH_FINISH_OPCODE,
1831 		&msg_offset);
1832 
1833 	if (u32_error) {
1834 		dev_warn(&ta_ctx->sep_used->pdev->dev, "hash finish error %x\n",
1835 			u32_error);
1836 		sep_crypto_release(sctx, ta_ctx, u32_error);
1837 		return u32_error;
1838 		}
1839 
1840 	/* Grab the result */
1841 	if (ta_ctx->current_hash_req->result == NULL) {
1842 		/* Oops, null buffer; error out here */
1843 		dev_warn(&ta_ctx->sep_used->pdev->dev,
1844 			"hash finish null buffer\n");
1845 		sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1846 		return -ENOMEM;
1847 		}
1848 
1849 	max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1850 		sizeof(u32)) * sizeof(u32);
1851 
1852 	sep_read_msg(ta_ctx,
1853 		ta_ctx->current_hash_req->result,
1854 		crypto_ahash_digestsize(tfm), max_length,
1855 		&msg_offset, 0);
1856 
1857 	/* Signal to crypto infrastructure and clear out */
1858 	dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash finish post op done\n");
1859 	sep_crypto_release(sctx, ta_ctx, 0);
1860 	return 0;
1861 }
1862 
hash_digest_post_op(struct sep_device * sep)1863 static u32 hash_digest_post_op(struct sep_device *sep)
1864 {
1865 	int max_length;
1866 	u32 u32_error;
1867 	u32 msg_offset;
1868 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1869 	struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1870 	struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1871 	dev_dbg(&ta_ctx->sep_used->pdev->dev,
1872 		"hash digest post op\n");
1873 
1874 	/* first bring msg from shared area to local area */
1875 	memcpy(ta_ctx->msg, sep->shared_addr,
1876 		SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1877 
1878 	u32_error = sep_verify_op(ta_ctx, SEP_HASH_SINGLE_OPCODE,
1879 		&msg_offset);
1880 
1881 	if (u32_error) {
1882 		dev_warn(&ta_ctx->sep_used->pdev->dev,
1883 			"hash digest finish error %x\n", u32_error);
1884 
1885 		sep_crypto_release(sctx, ta_ctx, u32_error);
1886 		return u32_error;
1887 		}
1888 
1889 	/* Grab the result */
1890 	if (ta_ctx->current_hash_req->result == NULL) {
1891 		/* Oops, null buffer; error out here */
1892 		dev_warn(&ta_ctx->sep_used->pdev->dev,
1893 			"hash digest finish null buffer\n");
1894 		sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1895 		return -ENOMEM;
1896 		}
1897 
1898 	max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1899 		sizeof(u32)) * sizeof(u32);
1900 
1901 	sep_read_msg(ta_ctx,
1902 		ta_ctx->current_hash_req->result,
1903 		crypto_ahash_digestsize(tfm), max_length,
1904 		&msg_offset, 0);
1905 
1906 	/* Signal to crypto infrastructure and clear out */
1907 	dev_dbg(&ta_ctx->sep_used->pdev->dev,
1908 		"hash digest finish post op done\n");
1909 
1910 	sep_crypto_release(sctx, ta_ctx, 0);
1911 	return 0;
1912 }
1913 
1914 /**
1915  * The sep_finish function is the function that is scheduled (via tasklet)
1916  * by the interrupt service routine when the SEP sends and interrupt
1917  * This is only called by the interrupt handler as a tasklet.
1918  */
sep_finish(unsigned long data)1919 static void sep_finish(unsigned long data)
1920 {
1921 	struct sep_device *sep_dev;
1922 	int res;
1923 
1924 	res = 0;
1925 
1926 	if (data == 0) {
1927 		pr_debug("sep_finish called with null data\n");
1928 		return;
1929 	}
1930 
1931 	sep_dev = (struct sep_device *)data;
1932 	if (sep_dev == NULL) {
1933 		pr_debug("sep_finish; sep_dev is NULL\n");
1934 		return;
1935 	}
1936 
1937 	if (sep_dev->in_kernel == (u32)0) {
1938 		dev_warn(&sep_dev->pdev->dev,
1939 			"sep_finish; not in kernel operation\n");
1940 		return;
1941 	}
1942 
1943 	/* Did we really do a sep command prior to this? */
1944 	if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
1945 		&sep_dev->ta_ctx->call_status.status)) {
1946 
1947 		dev_warn(&sep_dev->pdev->dev, "[PID%d] sendmsg not called\n",
1948 			current->pid);
1949 		return;
1950 	}
1951 
1952 	if (sep_dev->send_ct != sep_dev->reply_ct) {
1953 		dev_warn(&sep_dev->pdev->dev,
1954 			"[PID%d] poll; no message came back\n",
1955 			current->pid);
1956 		return;
1957 	}
1958 
1959 	/* Check for error (In case time ran out) */
1960 	if ((res != 0x0) && (res != 0x8)) {
1961 		dev_warn(&sep_dev->pdev->dev,
1962 			"[PID%d] poll; poll error GPR3 is %x\n",
1963 			current->pid, res);
1964 		return;
1965 	}
1966 
1967 	/* What kind of interrupt from sep was this? */
1968 	res = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
1969 
1970 	dev_dbg(&sep_dev->pdev->dev, "[PID%d] GPR2 at crypto finish is %x\n",
1971 		current->pid, res);
1972 
1973 	/* Print request? */
1974 	if ((res >> 30) & 0x1) {
1975 		dev_dbg(&sep_dev->pdev->dev, "[PID%d] sep print req\n",
1976 			current->pid);
1977 		dev_dbg(&sep_dev->pdev->dev, "[PID%d] contents: %s\n",
1978 			current->pid,
1979 			(char *)(sep_dev->shared_addr +
1980 			SEP_DRIVER_PRINTF_OFFSET_IN_BYTES));
1981 		return;
1982 	}
1983 
1984 	/* Request for daemon (not currently in POR)? */
1985 	if (res >> 31) {
1986 		dev_dbg(&sep_dev->pdev->dev,
1987 			"[PID%d] sep request; ignoring\n",
1988 			current->pid);
1989 		return;
1990 	}
1991 
1992 	/* If we got here, then we have a replay to a sep command */
1993 
1994 	dev_dbg(&sep_dev->pdev->dev,
1995 		"[PID%d] sep reply to command; processing request: %x\n",
1996 		current->pid, sep_dev->current_request);
1997 
1998 	switch (sep_dev->current_request) {
1999 	case AES_CBC:
2000 	case AES_ECB:
2001 	case DES_CBC:
2002 	case DES_ECB:
2003 		res = crypto_post_op(sep_dev);
2004 		break;
2005 	case SHA1:
2006 	case MD5:
2007 	case SHA224:
2008 	case SHA256:
2009 		switch (sep_dev->current_hash_stage) {
2010 		case HASH_INIT:
2011 			res = hash_init_post_op(sep_dev);
2012 			break;
2013 		case HASH_UPDATE:
2014 		case HASH_FINUP_DATA:
2015 			res = hash_update_post_op(sep_dev);
2016 			break;
2017 		case HASH_FINUP_FINISH:
2018 		case HASH_FINISH:
2019 			res = hash_final_post_op(sep_dev);
2020 			break;
2021 		case HASH_DIGEST:
2022 			res = hash_digest_post_op(sep_dev);
2023 			break;
2024 		default:
2025 			pr_debug("sep - invalid stage for hash finish\n");
2026 		}
2027 		break;
2028 	default:
2029 		pr_debug("sep - invalid request for finish\n");
2030 	}
2031 
2032 	if (res)
2033 		pr_debug("sep - finish returned error %x\n", res);
2034 }
2035 
sep_hash_cra_init(struct crypto_tfm * tfm)2036 static int sep_hash_cra_init(struct crypto_tfm *tfm)
2037 	{
2038 	const char *alg_name = crypto_tfm_alg_name(tfm);
2039 
2040 	pr_debug("sep_hash_cra_init name is %s\n", alg_name);
2041 
2042 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2043 		sizeof(struct this_task_ctx));
2044 	return 0;
2045 	}
2046 
sep_hash_cra_exit(struct crypto_tfm * tfm)2047 static void sep_hash_cra_exit(struct crypto_tfm *tfm)
2048 {
2049 	pr_debug("sep_hash_cra_exit\n");
2050 }
2051 
sep_hash_init(void * data)2052 static void sep_hash_init(void *data)
2053 {
2054 	u32 msg_offset;
2055 	int result;
2056 	struct ahash_request *req;
2057 	struct crypto_ahash *tfm;
2058 	struct this_task_ctx *ta_ctx;
2059 	struct sep_system_ctx *sctx;
2060 	unsigned long end_time;
2061 	int are_we_done_yet;
2062 
2063 	req = (struct ahash_request *)data;
2064 	tfm = crypto_ahash_reqtfm(req);
2065 	sctx = crypto_ahash_ctx(tfm);
2066 	ta_ctx = ahash_request_ctx(req);
2067 	ta_ctx->sep_used = sep_dev;
2068 
2069 	ta_ctx->are_we_done_yet = &are_we_done_yet;
2070 
2071 	dev_dbg(&ta_ctx->sep_used->pdev->dev,
2072 		"sep_hash_init\n");
2073 	ta_ctx->current_hash_stage = HASH_INIT;
2074 	/* opcode and mode */
2075 	sep_make_header(ta_ctx, &msg_offset, SEP_HASH_INIT_OPCODE);
2076 	sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2077 		sizeof(u32), sizeof(u32), &msg_offset, 0);
2078 	sep_end_msg(ta_ctx, msg_offset);
2079 
2080 	are_we_done_yet = 0;
2081 	result = sep_crypto_take_sep(ta_ctx);
2082 	if (result) {
2083 		dev_warn(&ta_ctx->sep_used->pdev->dev,
2084 			"sep_hash_init take sep failed\n");
2085 		sep_crypto_release(sctx, ta_ctx, -EINVAL);
2086 	}
2087 
2088 	/* now we sit and wait up to a fixed time for completion */
2089 	end_time = jiffies + (WAIT_TIME * HZ);
2090 	while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2091 		schedule();
2092 
2093 	/* Done waiting; still not done yet? */
2094 	if (are_we_done_yet == 0) {
2095 		dev_dbg(&ta_ctx->sep_used->pdev->dev,
2096 			"hash init never got done\n");
2097 		sep_crypto_release(sctx, ta_ctx, -EINVAL);
2098 		return;
2099 	}
2100 
2101 }
2102 
sep_hash_update(void * data)2103 static void sep_hash_update(void *data)
2104 {
2105 	int int_error;
2106 	u32 msg_offset;
2107 	u32 len;
2108 	struct sep_hash_internal_context *int_ctx;
2109 	u32 block_size;
2110 	u32 head_len;
2111 	u32 tail_len;
2112 	int are_we_done_yet;
2113 
2114 	static u32 msg[10];
2115 	static char small_buf[100];
2116 	void *src_ptr;
2117 	struct scatterlist *new_sg;
2118 	ssize_t copy_result;
2119 	struct ahash_request *req;
2120 	struct crypto_ahash *tfm;
2121 	struct this_task_ctx *ta_ctx;
2122 	struct sep_system_ctx *sctx;
2123 	unsigned long end_time;
2124 
2125 	req = (struct ahash_request *)data;
2126 	tfm = crypto_ahash_reqtfm(req);
2127 	sctx = crypto_ahash_ctx(tfm);
2128 	ta_ctx = ahash_request_ctx(req);
2129 	ta_ctx->sep_used = sep_dev;
2130 
2131 	ta_ctx->are_we_done_yet = &are_we_done_yet;
2132 
2133 	/* length for queue status */
2134 	ta_ctx->nbytes = req->nbytes;
2135 
2136 	dev_dbg(&ta_ctx->sep_used->pdev->dev,
2137 		"sep_hash_update\n");
2138 	ta_ctx->current_hash_stage = HASH_UPDATE;
2139 	len = req->nbytes;
2140 
2141 	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2142 	tail_len = req->nbytes % block_size;
2143 	dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", len);
2144 	dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2145 	dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2146 
2147 	/* Compute header/tail sizes */
2148 	int_ctx = (struct sep_hash_internal_context *)&sctx->
2149 		hash_private_ctx.internal_context;
2150 	head_len = (block_size - int_ctx->prev_update_bytes) % block_size;
2151 	tail_len = (req->nbytes - head_len) % block_size;
2152 
2153 	/* Make sure all pages are an even block */
2154 	int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2155 		req->nbytes,
2156 		block_size, &new_sg, 1);
2157 
2158 	if (int_error < 0) {
2159 		dev_warn(&ta_ctx->sep_used->pdev->dev,
2160 			"oddball pages error in crash update\n");
2161 		sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2162 		return;
2163 	} else if (int_error == 1) {
2164 		ta_ctx->src_sg = new_sg;
2165 		ta_ctx->src_sg_hold = new_sg;
2166 	} else {
2167 		ta_ctx->src_sg = req->src;
2168 		ta_ctx->src_sg_hold = NULL;
2169 	}
2170 
2171 	src_ptr = sg_virt(ta_ctx->src_sg);
2172 
2173 	if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2174 		/* null data */
2175 		src_ptr = NULL;
2176 	}
2177 
2178 	ta_ctx->dcb_input_data.app_in_address = src_ptr;
2179 	ta_ctx->dcb_input_data.data_in_size =
2180 		req->nbytes - (head_len + tail_len);
2181 	ta_ctx->dcb_input_data.app_out_address = NULL;
2182 	ta_ctx->dcb_input_data.block_size = block_size;
2183 	ta_ctx->dcb_input_data.tail_block_size = 0;
2184 	ta_ctx->dcb_input_data.is_applet = 0;
2185 	ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2186 	ta_ctx->dcb_input_data.dst_sg = NULL;
2187 
2188 	int_error = sep_create_dcb_dmatables_context_kernel(
2189 		ta_ctx->sep_used,
2190 		&ta_ctx->dcb_region,
2191 		&ta_ctx->dmatables_region,
2192 		&ta_ctx->dma_ctx,
2193 		&ta_ctx->dcb_input_data,
2194 		1);
2195 	if (int_error) {
2196 		dev_warn(&ta_ctx->sep_used->pdev->dev,
2197 			"hash update dma table create failed\n");
2198 		sep_crypto_release(sctx, ta_ctx, -EINVAL);
2199 		return;
2200 	}
2201 
2202 	/* Construct message to SEP */
2203 	sep_make_header(ta_ctx, &msg_offset, SEP_HASH_UPDATE_OPCODE);
2204 
2205 	msg[0] = (u32)0;
2206 	msg[1] = (u32)0;
2207 	msg[2] = (u32)0;
2208 
2209 	sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2210 		&msg_offset, 0);
2211 
2212 	/* Handle remainders */
2213 
2214 	/* Head */
2215 	sep_write_msg(ta_ctx, &head_len, sizeof(u32),
2216 		sizeof(u32), &msg_offset, 0);
2217 
2218 	if (head_len) {
2219 		copy_result = sg_copy_to_buffer(
2220 			req->src,
2221 			sep_sg_nents(ta_ctx->src_sg),
2222 			small_buf, head_len);
2223 
2224 		if (copy_result != head_len) {
2225 			dev_warn(&ta_ctx->sep_used->pdev->dev,
2226 				"sg head copy failure in hash block\n");
2227 			sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2228 			return;
2229 		}
2230 
2231 		sep_write_msg(ta_ctx, small_buf, head_len,
2232 			sizeof(u32) * 32, &msg_offset, 1);
2233 	} else {
2234 		msg_offset += sizeof(u32) * 32;
2235 	}
2236 
2237 	/* Tail */
2238 	sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2239 		sizeof(u32), &msg_offset, 0);
2240 
2241 	if (tail_len) {
2242 		copy_result = sep_copy_offset_sg(
2243 			ta_ctx->sep_used,
2244 			ta_ctx->src_sg,
2245 			req->nbytes - tail_len,
2246 			small_buf, tail_len);
2247 
2248 		if (copy_result != tail_len) {
2249 			dev_warn(&ta_ctx->sep_used->pdev->dev,
2250 				"sg tail copy failure in hash block\n");
2251 			sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2252 			return;
2253 		}
2254 
2255 		sep_write_msg(ta_ctx, small_buf, tail_len,
2256 			sizeof(u32) * 32, &msg_offset, 1);
2257 	} else {
2258 		msg_offset += sizeof(u32) * 32;
2259 	}
2260 
2261 	/* Context */
2262 	sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2263 		sizeof(struct sep_hash_private_context));
2264 
2265 	sep_end_msg(ta_ctx, msg_offset);
2266 	are_we_done_yet = 0;
2267 	int_error = sep_crypto_take_sep(ta_ctx);
2268 	if (int_error) {
2269 		dev_warn(&ta_ctx->sep_used->pdev->dev,
2270 			"sep_hash_update take sep failed\n");
2271 		sep_crypto_release(sctx, ta_ctx, -EINVAL);
2272 	}
2273 
2274 	/* now we sit and wait up to a fixed time for completion */
2275 	end_time = jiffies + (WAIT_TIME * HZ);
2276 	while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2277 		schedule();
2278 
2279 	/* Done waiting; still not done yet? */
2280 	if (are_we_done_yet == 0) {
2281 		dev_dbg(&ta_ctx->sep_used->pdev->dev,
2282 			"hash update never got done\n");
2283 		sep_crypto_release(sctx, ta_ctx, -EINVAL);
2284 		return;
2285 	}
2286 
2287 }
2288 
sep_hash_final(void * data)2289 static void sep_hash_final(void *data)
2290 {
2291 	u32 msg_offset;
2292 	struct ahash_request *req;
2293 	struct crypto_ahash *tfm;
2294 	struct this_task_ctx *ta_ctx;
2295 	struct sep_system_ctx *sctx;
2296 	int result;
2297 	unsigned long end_time;
2298 	int are_we_done_yet;
2299 
2300 	req = (struct ahash_request *)data;
2301 	tfm = crypto_ahash_reqtfm(req);
2302 	sctx = crypto_ahash_ctx(tfm);
2303 	ta_ctx = ahash_request_ctx(req);
2304 	ta_ctx->sep_used = sep_dev;
2305 
2306 	dev_dbg(&ta_ctx->sep_used->pdev->dev,
2307 		"sep_hash_final\n");
2308 	ta_ctx->current_hash_stage = HASH_FINISH;
2309 
2310 	ta_ctx->are_we_done_yet = &are_we_done_yet;
2311 
2312 	/* opcode and mode */
2313 	sep_make_header(ta_ctx, &msg_offset, SEP_HASH_FINISH_OPCODE);
2314 
2315 	/* Context */
2316 	sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2317 		sizeof(struct sep_hash_private_context));
2318 
2319 	sep_end_msg(ta_ctx, msg_offset);
2320 	are_we_done_yet = 0;
2321 	result = sep_crypto_take_sep(ta_ctx);
2322 	if (result) {
2323 		dev_warn(&ta_ctx->sep_used->pdev->dev,
2324 			"sep_hash_final take sep failed\n");
2325 		sep_crypto_release(sctx, ta_ctx, -EINVAL);
2326 	}
2327 
2328 	/* now we sit and wait up to a fixed time for completion */
2329 	end_time = jiffies + (WAIT_TIME * HZ);
2330 	while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2331 		schedule();
2332 
2333 	/* Done waiting; still not done yet? */
2334 	if (are_we_done_yet == 0) {
2335 		dev_dbg(&ta_ctx->sep_used->pdev->dev,
2336 			"hash final job never got done\n");
2337 		sep_crypto_release(sctx, ta_ctx, -EINVAL);
2338 		return;
2339 	}
2340 
2341 }
2342 
sep_hash_digest(void * data)2343 static void sep_hash_digest(void *data)
2344 {
2345 	int int_error;
2346 	u32 msg_offset;
2347 	u32 block_size;
2348 	u32 msg[10];
2349 	size_t copy_result;
2350 	int result;
2351 	int are_we_done_yet;
2352 	u32 tail_len;
2353 	static char small_buf[100];
2354 	struct scatterlist *new_sg;
2355 	void *src_ptr;
2356 
2357 	struct ahash_request *req;
2358 	struct crypto_ahash *tfm;
2359 	struct this_task_ctx *ta_ctx;
2360 	struct sep_system_ctx *sctx;
2361 	unsigned long end_time;
2362 
2363 	req = (struct ahash_request *)data;
2364 	tfm = crypto_ahash_reqtfm(req);
2365 	sctx = crypto_ahash_ctx(tfm);
2366 	ta_ctx = ahash_request_ctx(req);
2367 	ta_ctx->sep_used = sep_dev;
2368 
2369 	dev_dbg(&ta_ctx->sep_used->pdev->dev,
2370 		"sep_hash_digest\n");
2371 	ta_ctx->current_hash_stage = HASH_DIGEST;
2372 
2373 	ta_ctx->are_we_done_yet = &are_we_done_yet;
2374 
2375 	/* length for queue status */
2376 	ta_ctx->nbytes = req->nbytes;
2377 
2378 	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2379 	tail_len = req->nbytes % block_size;
2380 	dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", req->nbytes);
2381 	dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2382 	dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2383 
2384 	/* Make sure all pages are an even block */
2385 	int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2386 		req->nbytes,
2387 		block_size, &new_sg, 1);
2388 
2389 	if (int_error < 0) {
2390 		dev_warn(&ta_ctx->sep_used->pdev->dev,
2391 			"oddball pages error in crash update\n");
2392 		sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2393 		return;
2394 	} else if (int_error == 1) {
2395 		ta_ctx->src_sg = new_sg;
2396 		ta_ctx->src_sg_hold = new_sg;
2397 	} else {
2398 		ta_ctx->src_sg = req->src;
2399 		ta_ctx->src_sg_hold = NULL;
2400 	}
2401 
2402 	src_ptr = sg_virt(ta_ctx->src_sg);
2403 
2404 	if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2405 		/* null data */
2406 		src_ptr = NULL;
2407 	}
2408 
2409 	ta_ctx->dcb_input_data.app_in_address = src_ptr;
2410 	ta_ctx->dcb_input_data.data_in_size = req->nbytes - tail_len;
2411 	ta_ctx->dcb_input_data.app_out_address = NULL;
2412 	ta_ctx->dcb_input_data.block_size = block_size;
2413 	ta_ctx->dcb_input_data.tail_block_size = 0;
2414 	ta_ctx->dcb_input_data.is_applet = 0;
2415 	ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2416 	ta_ctx->dcb_input_data.dst_sg = NULL;
2417 
2418 	int_error = sep_create_dcb_dmatables_context_kernel(
2419 		ta_ctx->sep_used,
2420 		&ta_ctx->dcb_region,
2421 		&ta_ctx->dmatables_region,
2422 		&ta_ctx->dma_ctx,
2423 		&ta_ctx->dcb_input_data,
2424 		1);
2425 	if (int_error) {
2426 		dev_warn(&ta_ctx->sep_used->pdev->dev,
2427 			"hash update dma table create failed\n");
2428 		sep_crypto_release(sctx, ta_ctx, -EINVAL);
2429 		return;
2430 	}
2431 
2432 	/* Construct message to SEP */
2433 	sep_make_header(ta_ctx, &msg_offset, SEP_HASH_SINGLE_OPCODE);
2434 	sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2435 		sizeof(u32), sizeof(u32), &msg_offset, 0);
2436 
2437 	msg[0] = (u32)0;
2438 	msg[1] = (u32)0;
2439 	msg[2] = (u32)0;
2440 
2441 	sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2442 		&msg_offset, 0);
2443 
2444 	/* Tail */
2445 	sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2446 		sizeof(u32), &msg_offset, 0);
2447 
2448 	if (tail_len) {
2449 		copy_result = sep_copy_offset_sg(
2450 			ta_ctx->sep_used,
2451 			ta_ctx->src_sg,
2452 			req->nbytes - tail_len,
2453 			small_buf, tail_len);
2454 
2455 		if (copy_result != tail_len) {
2456 			dev_warn(&ta_ctx->sep_used->pdev->dev,
2457 				"sg tail copy failure in hash block\n");
2458 			sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2459 			return;
2460 		}
2461 
2462 		sep_write_msg(ta_ctx, small_buf, tail_len,
2463 			sizeof(u32) * 32, &msg_offset, 1);
2464 	} else {
2465 		msg_offset += sizeof(u32) * 32;
2466 	}
2467 
2468 	sep_end_msg(ta_ctx, msg_offset);
2469 
2470 	are_we_done_yet = 0;
2471 	result = sep_crypto_take_sep(ta_ctx);
2472 	if (result) {
2473 		dev_warn(&ta_ctx->sep_used->pdev->dev,
2474 			"sep_hash_digest take sep failed\n");
2475 		sep_crypto_release(sctx, ta_ctx, -EINVAL);
2476 	}
2477 
2478 	/* now we sit and wait up to a fixed time for completion */
2479 	end_time = jiffies + (WAIT_TIME * HZ);
2480 	while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2481 		schedule();
2482 
2483 	/* Done waiting; still not done yet? */
2484 	if (are_we_done_yet == 0) {
2485 		dev_dbg(&ta_ctx->sep_used->pdev->dev,
2486 			"hash digest job never got done\n");
2487 		sep_crypto_release(sctx, ta_ctx, -EINVAL);
2488 		return;
2489 	}
2490 
2491 }
2492 
2493 /**
2494  * This is what is called by each of the API's provided
2495  * in the kernel crypto descriptors. It is run in a process
2496  * context using the kernel workqueues. Therefore it can
2497  * be put to sleep.
2498  */
sep_dequeuer(void * data)2499 static void sep_dequeuer(void *data)
2500 {
2501 	struct crypto_queue *this_queue;
2502 	struct crypto_async_request *async_req;
2503 	struct crypto_async_request *backlog;
2504 	struct ablkcipher_request *cypher_req;
2505 	struct ahash_request *hash_req;
2506 	struct sep_system_ctx *sctx;
2507 	struct crypto_ahash *hash_tfm;
2508 	struct this_task_ctx *ta_ctx;
2509 
2510 
2511 	this_queue = (struct crypto_queue *)data;
2512 
2513 	spin_lock_irq(&queue_lock);
2514 	backlog = crypto_get_backlog(this_queue);
2515 	async_req = crypto_dequeue_request(this_queue);
2516 	spin_unlock_irq(&queue_lock);
2517 
2518 	if (!async_req) {
2519 		pr_debug("sep crypto queue is empty\n");
2520 		return;
2521 	}
2522 
2523 	if (backlog) {
2524 		pr_debug("sep crypto backlog set\n");
2525 		if (backlog->complete)
2526 			backlog->complete(backlog, -EINPROGRESS);
2527 		backlog = NULL;
2528 	}
2529 
2530 	if (!async_req->tfm) {
2531 		pr_debug("sep crypto queue null tfm\n");
2532 		return;
2533 	}
2534 
2535 	if (!async_req->tfm->__crt_alg) {
2536 		pr_debug("sep crypto queue null __crt_alg\n");
2537 		return;
2538 	}
2539 
2540 	if (!async_req->tfm->__crt_alg->cra_type) {
2541 		pr_debug("sep crypto queue null cra_type\n");
2542 		return;
2543 	}
2544 
2545 	/* we have stuff in the queue */
2546 	if (async_req->tfm->__crt_alg->cra_type !=
2547 		&crypto_ahash_type) {
2548 		/* This is for a cypher */
2549 		pr_debug("sep crypto queue doing cipher\n");
2550 		cypher_req = container_of(async_req,
2551 			struct ablkcipher_request,
2552 			base);
2553 		if (!cypher_req) {
2554 			pr_debug("sep crypto queue null cypher_req\n");
2555 			return;
2556 		}
2557 
2558 		sep_crypto_block((void *)cypher_req);
2559 		return;
2560 	} else {
2561 		/* This is a hash */
2562 		pr_debug("sep crypto queue doing hash\n");
2563 		/**
2564 		 * This is a bit more complex than cipher; we
2565 		 * need to figure out what type of operation
2566 		 */
2567 		hash_req = ahash_request_cast(async_req);
2568 		if (!hash_req) {
2569 			pr_debug("sep crypto queue null hash_req\n");
2570 			return;
2571 		}
2572 
2573 		hash_tfm = crypto_ahash_reqtfm(hash_req);
2574 		if (!hash_tfm) {
2575 			pr_debug("sep crypto queue null hash_tfm\n");
2576 			return;
2577 		}
2578 
2579 
2580 		sctx = crypto_ahash_ctx(hash_tfm);
2581 		if (!sctx) {
2582 			pr_debug("sep crypto queue null sctx\n");
2583 			return;
2584 		}
2585 
2586 		ta_ctx = ahash_request_ctx(hash_req);
2587 
2588 		if (ta_ctx->current_hash_stage == HASH_INIT) {
2589 			pr_debug("sep crypto queue hash init\n");
2590 			sep_hash_init((void *)hash_req);
2591 			return;
2592 		} else if (ta_ctx->current_hash_stage == HASH_UPDATE) {
2593 			pr_debug("sep crypto queue hash update\n");
2594 			sep_hash_update((void *)hash_req);
2595 			return;
2596 		} else if (ta_ctx->current_hash_stage == HASH_FINISH) {
2597 			pr_debug("sep crypto queue hash final\n");
2598 			sep_hash_final((void *)hash_req);
2599 			return;
2600 		} else if (ta_ctx->current_hash_stage == HASH_DIGEST) {
2601 			pr_debug("sep crypto queue hash digest\n");
2602 			sep_hash_digest((void *)hash_req);
2603 			return;
2604 		} else if (ta_ctx->current_hash_stage == HASH_FINUP_DATA) {
2605 			pr_debug("sep crypto queue hash digest\n");
2606 			sep_hash_update((void *)hash_req);
2607 			return;
2608 		} else if (ta_ctx->current_hash_stage == HASH_FINUP_FINISH) {
2609 			pr_debug("sep crypto queue hash digest\n");
2610 			sep_hash_final((void *)hash_req);
2611 			return;
2612 		} else {
2613 			pr_debug("sep crypto queue hash oops nothing\n");
2614 			return;
2615 		}
2616 	}
2617 }
2618 
sep_sha1_init(struct ahash_request * req)2619 static int sep_sha1_init(struct ahash_request *req)
2620 {
2621 	int error;
2622 	int error1;
2623 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2624 
2625 	pr_debug("sep - doing sha1 init\n");
2626 
2627 	/* Clear out task context */
2628 	memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2629 
2630 	ta_ctx->sep_used = sep_dev;
2631 	ta_ctx->current_request = SHA1;
2632 	ta_ctx->current_hash_req = req;
2633 	ta_ctx->current_cypher_req = NULL;
2634 	ta_ctx->hash_opmode = SEP_HASH_SHA1;
2635 	ta_ctx->current_hash_stage = HASH_INIT;
2636 
2637 	/* lock necessary so that only one entity touches the queues */
2638 	spin_lock_irq(&queue_lock);
2639 	error = crypto_enqueue_request(&sep_queue, &req->base);
2640 
2641 	if ((error != 0) && (error != -EINPROGRESS))
2642 		pr_debug(" sep - crypto enqueue failed: %x\n",
2643 			error);
2644 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2645 		sep_dequeuer, (void *)&sep_queue);
2646 	if (error1)
2647 		pr_debug(" sep - workqueue submit failed: %x\n",
2648 			error1);
2649 	spin_unlock_irq(&queue_lock);
2650 	/* We return result of crypto enqueue */
2651 	return error;
2652 }
2653 
sep_sha1_update(struct ahash_request * req)2654 static int sep_sha1_update(struct ahash_request *req)
2655 {
2656 	int error;
2657 	int error1;
2658 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2659 
2660 	pr_debug("sep - doing sha1 update\n");
2661 
2662 	ta_ctx->sep_used = sep_dev;
2663 	ta_ctx->current_request = SHA1;
2664 	ta_ctx->current_hash_req = req;
2665 	ta_ctx->current_cypher_req = NULL;
2666 	ta_ctx->hash_opmode = SEP_HASH_SHA1;
2667 	ta_ctx->current_hash_stage = HASH_UPDATE;
2668 
2669 	/* lock necessary so that only one entity touches the queues */
2670 	spin_lock_irq(&queue_lock);
2671 	error = crypto_enqueue_request(&sep_queue, &req->base);
2672 
2673 	if ((error != 0) && (error != -EINPROGRESS))
2674 		pr_debug(" sep - crypto enqueue failed: %x\n",
2675 			error);
2676 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2677 		sep_dequeuer, (void *)&sep_queue);
2678 	if (error1)
2679 		pr_debug(" sep - workqueue submit failed: %x\n",
2680 			error1);
2681 	spin_unlock_irq(&queue_lock);
2682 	/* We return result of crypto enqueue */
2683 	return error;
2684 }
2685 
sep_sha1_final(struct ahash_request * req)2686 static int sep_sha1_final(struct ahash_request *req)
2687 {
2688 	int error;
2689 	int error1;
2690 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2691 	pr_debug("sep - doing sha1 final\n");
2692 
2693 	ta_ctx->sep_used = sep_dev;
2694 	ta_ctx->current_request = SHA1;
2695 	ta_ctx->current_hash_req = req;
2696 	ta_ctx->current_cypher_req = NULL;
2697 	ta_ctx->hash_opmode = SEP_HASH_SHA1;
2698 	ta_ctx->current_hash_stage = HASH_FINISH;
2699 
2700 	/* lock necessary so that only one entity touches the queues */
2701 	spin_lock_irq(&queue_lock);
2702 	error = crypto_enqueue_request(&sep_queue, &req->base);
2703 
2704 	if ((error != 0) && (error != -EINPROGRESS))
2705 		pr_debug(" sep - crypto enqueue failed: %x\n",
2706 			error);
2707 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2708 		sep_dequeuer, (void *)&sep_queue);
2709 	if (error1)
2710 		pr_debug(" sep - workqueue submit failed: %x\n",
2711 			error1);
2712 	spin_unlock_irq(&queue_lock);
2713 	/* We return result of crypto enqueue */
2714 	return error;
2715 }
2716 
sep_sha1_digest(struct ahash_request * req)2717 static int sep_sha1_digest(struct ahash_request *req)
2718 {
2719 	int error;
2720 	int error1;
2721 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2722 	pr_debug("sep - doing sha1 digest\n");
2723 
2724 	/* Clear out task context */
2725 	memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2726 
2727 	ta_ctx->sep_used = sep_dev;
2728 	ta_ctx->current_request = SHA1;
2729 	ta_ctx->current_hash_req = req;
2730 	ta_ctx->current_cypher_req = NULL;
2731 	ta_ctx->hash_opmode = SEP_HASH_SHA1;
2732 	ta_ctx->current_hash_stage = HASH_DIGEST;
2733 
2734 	/* lock necessary so that only one entity touches the queues */
2735 	spin_lock_irq(&queue_lock);
2736 	error = crypto_enqueue_request(&sep_queue, &req->base);
2737 
2738 	if ((error != 0) && (error != -EINPROGRESS))
2739 		pr_debug(" sep - crypto enqueue failed: %x\n",
2740 			error);
2741 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2742 		sep_dequeuer, (void *)&sep_queue);
2743 	if (error1)
2744 		pr_debug(" sep - workqueue submit failed: %x\n",
2745 			error1);
2746 	spin_unlock_irq(&queue_lock);
2747 	/* We return result of crypto enqueue */
2748 	return error;
2749 }
2750 
sep_sha1_finup(struct ahash_request * req)2751 static int sep_sha1_finup(struct ahash_request *req)
2752 {
2753 	int error;
2754 	int error1;
2755 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2756 	pr_debug("sep - doing sha1 finup\n");
2757 
2758 	ta_ctx->sep_used = sep_dev;
2759 	ta_ctx->current_request = SHA1;
2760 	ta_ctx->current_hash_req = req;
2761 	ta_ctx->current_cypher_req = NULL;
2762 	ta_ctx->hash_opmode = SEP_HASH_SHA1;
2763 	ta_ctx->current_hash_stage = HASH_FINUP_DATA;
2764 
2765 	/* lock necessary so that only one entity touches the queues */
2766 	spin_lock_irq(&queue_lock);
2767 	error = crypto_enqueue_request(&sep_queue, &req->base);
2768 
2769 	if ((error != 0) && (error != -EINPROGRESS))
2770 		pr_debug(" sep - crypto enqueue failed: %x\n",
2771 			error);
2772 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2773 		sep_dequeuer, (void *)&sep_queue);
2774 	if (error1)
2775 		pr_debug(" sep - workqueue submit failed: %x\n",
2776 			error1);
2777 	spin_unlock_irq(&queue_lock);
2778 	/* We return result of crypto enqueue */
2779 	return error;
2780 }
2781 
sep_md5_init(struct ahash_request * req)2782 static int sep_md5_init(struct ahash_request *req)
2783 {
2784 	int error;
2785 	int error1;
2786 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2787 	pr_debug("sep - doing md5 init\n");
2788 
2789 	/* Clear out task context */
2790 	memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2791 
2792 	ta_ctx->sep_used = sep_dev;
2793 	ta_ctx->current_request = MD5;
2794 	ta_ctx->current_hash_req = req;
2795 	ta_ctx->current_cypher_req = NULL;
2796 	ta_ctx->hash_opmode = SEP_HASH_MD5;
2797 	ta_ctx->current_hash_stage = HASH_INIT;
2798 
2799 	/* lock necessary so that only one entity touches the queues */
2800 	spin_lock_irq(&queue_lock);
2801 	error = crypto_enqueue_request(&sep_queue, &req->base);
2802 
2803 	if ((error != 0) && (error != -EINPROGRESS))
2804 		pr_debug(" sep - crypto enqueue failed: %x\n",
2805 			error);
2806 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2807 		sep_dequeuer, (void *)&sep_queue);
2808 	if (error1)
2809 		pr_debug(" sep - workqueue submit failed: %x\n",
2810 			error1);
2811 	spin_unlock_irq(&queue_lock);
2812 	/* We return result of crypto enqueue */
2813 	return error;
2814 }
2815 
sep_md5_update(struct ahash_request * req)2816 static int sep_md5_update(struct ahash_request *req)
2817 {
2818 	int error;
2819 	int error1;
2820 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2821 	pr_debug("sep - doing md5 update\n");
2822 
2823 	ta_ctx->sep_used = sep_dev;
2824 	ta_ctx->current_request = MD5;
2825 	ta_ctx->current_hash_req = req;
2826 	ta_ctx->current_cypher_req = NULL;
2827 	ta_ctx->hash_opmode = SEP_HASH_MD5;
2828 	ta_ctx->current_hash_stage = HASH_UPDATE;
2829 
2830 	/* lock necessary so that only one entity touches the queues */
2831 	spin_lock_irq(&queue_lock);
2832 	error = crypto_enqueue_request(&sep_queue, &req->base);
2833 
2834 	if ((error != 0) && (error != -EINPROGRESS))
2835 		pr_debug(" sep - crypto enqueue failed: %x\n",
2836 			error);
2837 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2838 		sep_dequeuer, (void *)&sep_queue);
2839 	if (error1)
2840 		pr_debug(" sep - workqueue submit failed: %x\n",
2841 			error1);
2842 	spin_unlock_irq(&queue_lock);
2843 	/* We return result of crypto enqueue */
2844 	return error;
2845 }
2846 
sep_md5_final(struct ahash_request * req)2847 static int sep_md5_final(struct ahash_request *req)
2848 {
2849 	int error;
2850 	int error1;
2851 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2852 	pr_debug("sep - doing md5 final\n");
2853 
2854 	ta_ctx->sep_used = sep_dev;
2855 	ta_ctx->current_request = MD5;
2856 	ta_ctx->current_hash_req = req;
2857 	ta_ctx->current_cypher_req = NULL;
2858 	ta_ctx->hash_opmode = SEP_HASH_MD5;
2859 	ta_ctx->current_hash_stage = HASH_FINISH;
2860 
2861 	/* lock necessary so that only one entity touches the queues */
2862 	spin_lock_irq(&queue_lock);
2863 	error = crypto_enqueue_request(&sep_queue, &req->base);
2864 
2865 	if ((error != 0) && (error != -EINPROGRESS))
2866 		pr_debug(" sep - crypto enqueue failed: %x\n",
2867 			error);
2868 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2869 		sep_dequeuer, (void *)&sep_queue);
2870 	if (error1)
2871 		pr_debug(" sep - workqueue submit failed: %x\n",
2872 			error1);
2873 	spin_unlock_irq(&queue_lock);
2874 	/* We return result of crypto enqueue */
2875 	return error;
2876 }
2877 
sep_md5_digest(struct ahash_request * req)2878 static int sep_md5_digest(struct ahash_request *req)
2879 {
2880 	int error;
2881 	int error1;
2882 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2883 
2884 	pr_debug("sep - doing md5 digest\n");
2885 
2886 	/* Clear out task context */
2887 	memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2888 
2889 	ta_ctx->sep_used = sep_dev;
2890 	ta_ctx->current_request = MD5;
2891 	ta_ctx->current_hash_req = req;
2892 	ta_ctx->current_cypher_req = NULL;
2893 	ta_ctx->hash_opmode = SEP_HASH_MD5;
2894 	ta_ctx->current_hash_stage = HASH_DIGEST;
2895 
2896 	/* lock necessary so that only one entity touches the queues */
2897 	spin_lock_irq(&queue_lock);
2898 	error = crypto_enqueue_request(&sep_queue, &req->base);
2899 
2900 	if ((error != 0) && (error != -EINPROGRESS))
2901 		pr_debug(" sep - crypto enqueue failed: %x\n",
2902 			error);
2903 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2904 		sep_dequeuer, (void *)&sep_queue);
2905 	if (error1)
2906 		pr_debug(" sep - workqueue submit failed: %x\n",
2907 			error1);
2908 	spin_unlock_irq(&queue_lock);
2909 	/* We return result of crypto enqueue */
2910 	return error;
2911 }
2912 
sep_md5_finup(struct ahash_request * req)2913 static int sep_md5_finup(struct ahash_request *req)
2914 {
2915 	int error;
2916 	int error1;
2917 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2918 
2919 	pr_debug("sep - doing md5 finup\n");
2920 
2921 	ta_ctx->sep_used = sep_dev;
2922 	ta_ctx->current_request = MD5;
2923 	ta_ctx->current_hash_req = req;
2924 	ta_ctx->current_cypher_req = NULL;
2925 	ta_ctx->hash_opmode = SEP_HASH_MD5;
2926 	ta_ctx->current_hash_stage = HASH_FINUP_DATA;
2927 
2928 	/* lock necessary so that only one entity touches the queues */
2929 	spin_lock_irq(&queue_lock);
2930 	error = crypto_enqueue_request(&sep_queue, &req->base);
2931 
2932 	if ((error != 0) && (error != -EINPROGRESS))
2933 		pr_debug(" sep - crypto enqueue failed: %x\n",
2934 			error);
2935 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2936 		sep_dequeuer, (void *)&sep_queue);
2937 	if (error1)
2938 		pr_debug(" sep - workqueue submit failed: %x\n",
2939 			error1);
2940 	spin_unlock_irq(&queue_lock);
2941 	/* We return result of crypto enqueue */
2942 	return error;
2943 }
2944 
sep_sha224_init(struct ahash_request * req)2945 static int sep_sha224_init(struct ahash_request *req)
2946 {
2947 	int error;
2948 	int error1;
2949 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2950 	pr_debug("sep - doing sha224 init\n");
2951 
2952 	/* Clear out task context */
2953 	memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2954 
2955 	ta_ctx->sep_used = sep_dev;
2956 	ta_ctx->current_request = SHA224;
2957 	ta_ctx->current_hash_req = req;
2958 	ta_ctx->current_cypher_req = NULL;
2959 	ta_ctx->hash_opmode = SEP_HASH_SHA224;
2960 	ta_ctx->current_hash_stage = HASH_INIT;
2961 
2962 	/* lock necessary so that only one entity touches the queues */
2963 	spin_lock_irq(&queue_lock);
2964 	error = crypto_enqueue_request(&sep_queue, &req->base);
2965 
2966 	if ((error != 0) && (error != -EINPROGRESS))
2967 		pr_debug(" sep - crypto enqueue failed: %x\n",
2968 			error);
2969 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2970 		sep_dequeuer, (void *)&sep_queue);
2971 	if (error1)
2972 		pr_debug(" sep - workqueue submit failed: %x\n",
2973 			error1);
2974 	spin_unlock_irq(&queue_lock);
2975 	/* We return result of crypto enqueue */
2976 	return error;
2977 }
2978 
sep_sha224_update(struct ahash_request * req)2979 static int sep_sha224_update(struct ahash_request *req)
2980 {
2981 	int error;
2982 	int error1;
2983 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2984 	pr_debug("sep - doing sha224 update\n");
2985 
2986 	ta_ctx->sep_used = sep_dev;
2987 	ta_ctx->current_request = SHA224;
2988 	ta_ctx->current_hash_req = req;
2989 	ta_ctx->current_cypher_req = NULL;
2990 	ta_ctx->hash_opmode = SEP_HASH_SHA224;
2991 	ta_ctx->current_hash_stage = HASH_UPDATE;
2992 
2993 	/* lock necessary so that only one entity touches the queues */
2994 	spin_lock_irq(&queue_lock);
2995 	error = crypto_enqueue_request(&sep_queue, &req->base);
2996 
2997 	if ((error != 0) && (error != -EINPROGRESS))
2998 		pr_debug(" sep - crypto enqueue failed: %x\n",
2999 			error);
3000 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3001 		sep_dequeuer, (void *)&sep_queue);
3002 	if (error1)
3003 		pr_debug(" sep - workqueue submit failed: %x\n",
3004 			error1);
3005 	spin_unlock_irq(&queue_lock);
3006 	/* We return result of crypto enqueue */
3007 	return error;
3008 }
3009 
sep_sha224_final(struct ahash_request * req)3010 static int sep_sha224_final(struct ahash_request *req)
3011 {
3012 	int error;
3013 	int error1;
3014 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3015 	pr_debug("sep - doing sha224 final\n");
3016 
3017 	ta_ctx->sep_used = sep_dev;
3018 	ta_ctx->current_request = SHA224;
3019 	ta_ctx->current_hash_req = req;
3020 	ta_ctx->current_cypher_req = NULL;
3021 	ta_ctx->hash_opmode = SEP_HASH_SHA224;
3022 	ta_ctx->current_hash_stage = HASH_FINISH;
3023 
3024 	/* lock necessary so that only one entity touches the queues */
3025 	spin_lock_irq(&queue_lock);
3026 	error = crypto_enqueue_request(&sep_queue, &req->base);
3027 
3028 	if ((error != 0) && (error != -EINPROGRESS))
3029 		pr_debug(" sep - crypto enqueue failed: %x\n",
3030 			error);
3031 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3032 		sep_dequeuer, (void *)&sep_queue);
3033 	if (error1)
3034 		pr_debug(" sep - workqueue submit failed: %x\n",
3035 			error1);
3036 	spin_unlock_irq(&queue_lock);
3037 	/* We return result of crypto enqueue */
3038 	return error;
3039 }
3040 
sep_sha224_digest(struct ahash_request * req)3041 static int sep_sha224_digest(struct ahash_request *req)
3042 {
3043 	int error;
3044 	int error1;
3045 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3046 
3047 	pr_debug("sep - doing sha224 digest\n");
3048 
3049 	/* Clear out task context */
3050 	memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3051 
3052 	ta_ctx->sep_used = sep_dev;
3053 	ta_ctx->current_request = SHA224;
3054 	ta_ctx->current_hash_req = req;
3055 	ta_ctx->current_cypher_req = NULL;
3056 	ta_ctx->hash_opmode = SEP_HASH_SHA224;
3057 	ta_ctx->current_hash_stage = HASH_DIGEST;
3058 
3059 	/* lock necessary so that only one entity touches the queues */
3060 	spin_lock_irq(&queue_lock);
3061 	error = crypto_enqueue_request(&sep_queue, &req->base);
3062 
3063 	if ((error != 0) && (error != -EINPROGRESS))
3064 		pr_debug(" sep - crypto enqueue failed: %x\n",
3065 			error);
3066 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3067 		sep_dequeuer, (void *)&sep_queue);
3068 	if (error1)
3069 		pr_debug(" sep - workqueue submit failed: %x\n",
3070 			error1);
3071 	spin_unlock_irq(&queue_lock);
3072 	/* We return result of crypto enqueue */
3073 	return error;
3074 }
3075 
sep_sha224_finup(struct ahash_request * req)3076 static int sep_sha224_finup(struct ahash_request *req)
3077 {
3078 	int error;
3079 	int error1;
3080 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3081 
3082 	pr_debug("sep - doing sha224 finup\n");
3083 
3084 	ta_ctx->sep_used = sep_dev;
3085 	ta_ctx->current_request = SHA224;
3086 	ta_ctx->current_hash_req = req;
3087 	ta_ctx->current_cypher_req = NULL;
3088 	ta_ctx->hash_opmode = SEP_HASH_SHA224;
3089 	ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3090 
3091 	/* lock necessary so that only one entity touches the queues */
3092 	spin_lock_irq(&queue_lock);
3093 	error = crypto_enqueue_request(&sep_queue, &req->base);
3094 
3095 	if ((error != 0) && (error != -EINPROGRESS))
3096 		pr_debug(" sep - crypto enqueue failed: %x\n",
3097 			error);
3098 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3099 		sep_dequeuer, (void *)&sep_queue);
3100 	if (error1)
3101 		pr_debug(" sep - workqueue submit failed: %x\n",
3102 			error1);
3103 	spin_unlock_irq(&queue_lock);
3104 	/* We return result of crypto enqueue */
3105 	return error;
3106 }
3107 
sep_sha256_init(struct ahash_request * req)3108 static int sep_sha256_init(struct ahash_request *req)
3109 {
3110 	int error;
3111 	int error1;
3112 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3113 	pr_debug("sep - doing sha256 init\n");
3114 
3115 	/* Clear out task context */
3116 	memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3117 
3118 	ta_ctx->sep_used = sep_dev;
3119 	ta_ctx->current_request = SHA256;
3120 	ta_ctx->current_hash_req = req;
3121 	ta_ctx->current_cypher_req = NULL;
3122 	ta_ctx->hash_opmode = SEP_HASH_SHA256;
3123 	ta_ctx->current_hash_stage = HASH_INIT;
3124 
3125 	/* lock necessary so that only one entity touches the queues */
3126 	spin_lock_irq(&queue_lock);
3127 	error = crypto_enqueue_request(&sep_queue, &req->base);
3128 
3129 	if ((error != 0) && (error != -EINPROGRESS))
3130 		pr_debug(" sep - crypto enqueue failed: %x\n",
3131 			error);
3132 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3133 		sep_dequeuer, (void *)&sep_queue);
3134 	if (error1)
3135 		pr_debug(" sep - workqueue submit failed: %x\n",
3136 			error1);
3137 	spin_unlock_irq(&queue_lock);
3138 	/* We return result of crypto enqueue */
3139 	return error;
3140 }
3141 
sep_sha256_update(struct ahash_request * req)3142 static int sep_sha256_update(struct ahash_request *req)
3143 {
3144 	int error;
3145 	int error1;
3146 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3147 	pr_debug("sep - doing sha256 update\n");
3148 
3149 	ta_ctx->sep_used = sep_dev;
3150 	ta_ctx->current_request = SHA256;
3151 	ta_ctx->current_hash_req = req;
3152 	ta_ctx->current_cypher_req = NULL;
3153 	ta_ctx->hash_opmode = SEP_HASH_SHA256;
3154 	ta_ctx->current_hash_stage = HASH_UPDATE;
3155 
3156 	/* lock necessary so that only one entity touches the queues */
3157 	spin_lock_irq(&queue_lock);
3158 	error = crypto_enqueue_request(&sep_queue, &req->base);
3159 
3160 	if ((error != 0) && (error != -EINPROGRESS))
3161 		pr_debug(" sep - crypto enqueue failed: %x\n",
3162 			error);
3163 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3164 		sep_dequeuer, (void *)&sep_queue);
3165 	if (error1)
3166 		pr_debug(" sep - workqueue submit failed: %x\n",
3167 			error1);
3168 	spin_unlock_irq(&queue_lock);
3169 	/* We return result of crypto enqueue */
3170 	return error;
3171 }
3172 
sep_sha256_final(struct ahash_request * req)3173 static int sep_sha256_final(struct ahash_request *req)
3174 {
3175 	int error;
3176 	int error1;
3177 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3178 	pr_debug("sep - doing sha256 final\n");
3179 
3180 	ta_ctx->sep_used = sep_dev;
3181 	ta_ctx->current_request = SHA256;
3182 	ta_ctx->current_hash_req = req;
3183 	ta_ctx->current_cypher_req = NULL;
3184 	ta_ctx->hash_opmode = SEP_HASH_SHA256;
3185 	ta_ctx->current_hash_stage = HASH_FINISH;
3186 
3187 	/* lock necessary so that only one entity touches the queues */
3188 	spin_lock_irq(&queue_lock);
3189 	error = crypto_enqueue_request(&sep_queue, &req->base);
3190 
3191 	if ((error != 0) && (error != -EINPROGRESS))
3192 		pr_debug(" sep - crypto enqueue failed: %x\n",
3193 			error);
3194 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3195 		sep_dequeuer, (void *)&sep_queue);
3196 	if (error1)
3197 		pr_debug(" sep - workqueue submit failed: %x\n",
3198 			error1);
3199 	spin_unlock_irq(&queue_lock);
3200 	/* We return result of crypto enqueue */
3201 	return error;
3202 }
3203 
sep_sha256_digest(struct ahash_request * req)3204 static int sep_sha256_digest(struct ahash_request *req)
3205 {
3206 	int error;
3207 	int error1;
3208 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3209 
3210 	pr_debug("sep - doing sha256 digest\n");
3211 
3212 	/* Clear out task context */
3213 	memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3214 
3215 	ta_ctx->sep_used = sep_dev;
3216 	ta_ctx->current_request = SHA256;
3217 	ta_ctx->current_hash_req = req;
3218 	ta_ctx->current_cypher_req = NULL;
3219 	ta_ctx->hash_opmode = SEP_HASH_SHA256;
3220 	ta_ctx->current_hash_stage = HASH_DIGEST;
3221 
3222 	/* lock necessary so that only one entity touches the queues */
3223 	spin_lock_irq(&queue_lock);
3224 	error = crypto_enqueue_request(&sep_queue, &req->base);
3225 
3226 	if ((error != 0) && (error != -EINPROGRESS))
3227 		pr_debug(" sep - crypto enqueue failed: %x\n",
3228 			error);
3229 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3230 		sep_dequeuer, (void *)&sep_queue);
3231 	if (error1)
3232 		pr_debug(" sep - workqueue submit failed: %x\n",
3233 			error1);
3234 	spin_unlock_irq(&queue_lock);
3235 	/* We return result of crypto enqueue */
3236 	return error;
3237 }
3238 
sep_sha256_finup(struct ahash_request * req)3239 static int sep_sha256_finup(struct ahash_request *req)
3240 {
3241 	int error;
3242 	int error1;
3243 	struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3244 
3245 	pr_debug("sep - doing sha256 finup\n");
3246 
3247 	ta_ctx->sep_used = sep_dev;
3248 	ta_ctx->current_request = SHA256;
3249 	ta_ctx->current_hash_req = req;
3250 	ta_ctx->current_cypher_req = NULL;
3251 	ta_ctx->hash_opmode = SEP_HASH_SHA256;
3252 	ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3253 
3254 	/* lock necessary so that only one entity touches the queues */
3255 	spin_lock_irq(&queue_lock);
3256 	error = crypto_enqueue_request(&sep_queue, &req->base);
3257 
3258 	if ((error != 0) && (error != -EINPROGRESS))
3259 		pr_debug(" sep - crypto enqueue failed: %x\n",
3260 			error);
3261 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3262 		sep_dequeuer, (void *)&sep_queue);
3263 	if (error1)
3264 		pr_debug(" sep - workqueue submit failed: %x\n",
3265 			error1);
3266 	spin_unlock_irq(&queue_lock);
3267 	/* We return result of crypto enqueue */
3268 	return error;
3269 }
3270 
sep_crypto_init(struct crypto_tfm * tfm)3271 static int sep_crypto_init(struct crypto_tfm *tfm)
3272 {
3273 	const char *alg_name = crypto_tfm_alg_name(tfm);
3274 
3275 	if (alg_name == NULL)
3276 		pr_debug("sep_crypto_init alg is NULL\n");
3277 	else
3278 		pr_debug("sep_crypto_init alg is %s\n", alg_name);
3279 
3280 	tfm->crt_ablkcipher.reqsize = sizeof(struct this_task_ctx);
3281 	return 0;
3282 }
3283 
sep_crypto_exit(struct crypto_tfm * tfm)3284 static void sep_crypto_exit(struct crypto_tfm *tfm)
3285 {
3286 	pr_debug("sep_crypto_exit\n");
3287 }
3288 
sep_aes_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)3289 static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3290 	unsigned int keylen)
3291 {
3292 	struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3293 
3294 	pr_debug("sep aes setkey\n");
3295 
3296 	pr_debug("tfm is %p sctx is %p\n", tfm, sctx);
3297 	switch (keylen) {
3298 	case SEP_AES_KEY_128_SIZE:
3299 		sctx->aes_key_size = AES_128;
3300 		break;
3301 	case SEP_AES_KEY_192_SIZE:
3302 		sctx->aes_key_size = AES_192;
3303 		break;
3304 	case SEP_AES_KEY_256_SIZE:
3305 		sctx->aes_key_size = AES_256;
3306 		break;
3307 	case SEP_AES_KEY_512_SIZE:
3308 		sctx->aes_key_size = AES_512;
3309 		break;
3310 	default:
3311 		pr_debug("invalid sep aes key size %x\n",
3312 			keylen);
3313 		return -EINVAL;
3314 	}
3315 
3316 	memset(&sctx->key.aes, 0, sizeof(u32) *
3317 		SEP_AES_MAX_KEY_SIZE_WORDS);
3318 	memcpy(&sctx->key.aes, key, keylen);
3319 	sctx->keylen = keylen;
3320 	/* Indicate to encrypt/decrypt function to send key to SEP */
3321 	sctx->key_sent = 0;
3322 
3323 	return 0;
3324 }
3325 
sep_aes_ecb_encrypt(struct ablkcipher_request * req)3326 static int sep_aes_ecb_encrypt(struct ablkcipher_request *req)
3327 {
3328 	int error;
3329 	int error1;
3330 	struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3331 
3332 	pr_debug("sep - doing aes ecb encrypt\n");
3333 
3334 	/* Clear out task context */
3335 	memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3336 
3337 	ta_ctx->sep_used = sep_dev;
3338 	ta_ctx->current_request = AES_ECB;
3339 	ta_ctx->current_hash_req = NULL;
3340 	ta_ctx->current_cypher_req = req;
3341 	ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3342 	ta_ctx->aes_opmode = SEP_AES_ECB;
3343 	ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3344 	ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3345 
3346 	/* lock necessary so that only one entity touches the queues */
3347 	spin_lock_irq(&queue_lock);
3348 	error = crypto_enqueue_request(&sep_queue, &req->base);
3349 
3350 	if ((error != 0) && (error != -EINPROGRESS))
3351 		pr_debug(" sep - crypto enqueue failed: %x\n",
3352 			error);
3353 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3354 		sep_dequeuer, (void *)&sep_queue);
3355 	if (error1)
3356 		pr_debug(" sep - workqueue submit failed: %x\n",
3357 			error1);
3358 	spin_unlock_irq(&queue_lock);
3359 	/* We return result of crypto enqueue */
3360 	return error;
3361 }
3362 
sep_aes_ecb_decrypt(struct ablkcipher_request * req)3363 static int sep_aes_ecb_decrypt(struct ablkcipher_request *req)
3364 {
3365 	int error;
3366 	int error1;
3367 	struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3368 
3369 	pr_debug("sep - doing aes ecb decrypt\n");
3370 
3371 	/* Clear out task context */
3372 	memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3373 
3374 	ta_ctx->sep_used = sep_dev;
3375 	ta_ctx->current_request = AES_ECB;
3376 	ta_ctx->current_hash_req = NULL;
3377 	ta_ctx->current_cypher_req = req;
3378 	ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3379 	ta_ctx->aes_opmode = SEP_AES_ECB;
3380 	ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3381 	ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3382 
3383 	/* lock necessary so that only one entity touches the queues */
3384 	spin_lock_irq(&queue_lock);
3385 	error = crypto_enqueue_request(&sep_queue, &req->base);
3386 
3387 	if ((error != 0) && (error != -EINPROGRESS))
3388 		pr_debug(" sep - crypto enqueue failed: %x\n",
3389 			error);
3390 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3391 		sep_dequeuer, (void *)&sep_queue);
3392 	if (error1)
3393 		pr_debug(" sep - workqueue submit failed: %x\n",
3394 			error1);
3395 	spin_unlock_irq(&queue_lock);
3396 	/* We return result of crypto enqueue */
3397 	return error;
3398 }
3399 
sep_aes_cbc_encrypt(struct ablkcipher_request * req)3400 static int sep_aes_cbc_encrypt(struct ablkcipher_request *req)
3401 {
3402 	int error;
3403 	int error1;
3404 	struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3405 	struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3406 		crypto_ablkcipher_reqtfm(req));
3407 
3408 	pr_debug("sep - doing aes cbc encrypt\n");
3409 
3410 	/* Clear out task context */
3411 	memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3412 
3413 	pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3414 		crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3415 
3416 	ta_ctx->sep_used = sep_dev;
3417 	ta_ctx->current_request = AES_CBC;
3418 	ta_ctx->current_hash_req = NULL;
3419 	ta_ctx->current_cypher_req = req;
3420 	ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3421 	ta_ctx->aes_opmode = SEP_AES_CBC;
3422 	ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3423 	ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3424 
3425 	/* lock necessary so that only one entity touches the queues */
3426 	spin_lock_irq(&queue_lock);
3427 	error = crypto_enqueue_request(&sep_queue, &req->base);
3428 
3429 	if ((error != 0) && (error != -EINPROGRESS))
3430 		pr_debug(" sep - crypto enqueue failed: %x\n",
3431 			error);
3432 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3433 		sep_dequeuer, (void *)&sep_queue);
3434 	if (error1)
3435 		pr_debug(" sep - workqueue submit failed: %x\n",
3436 			error1);
3437 	spin_unlock_irq(&queue_lock);
3438 	/* We return result of crypto enqueue */
3439 	return error;
3440 }
3441 
sep_aes_cbc_decrypt(struct ablkcipher_request * req)3442 static int sep_aes_cbc_decrypt(struct ablkcipher_request *req)
3443 {
3444 	int error;
3445 	int error1;
3446 	struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3447 	struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3448 		crypto_ablkcipher_reqtfm(req));
3449 
3450 	pr_debug("sep - doing aes cbc decrypt\n");
3451 
3452 	pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3453 		crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3454 
3455 	/* Clear out task context */
3456 	memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3457 
3458 	ta_ctx->sep_used = sep_dev;
3459 	ta_ctx->current_request = AES_CBC;
3460 	ta_ctx->current_hash_req = NULL;
3461 	ta_ctx->current_cypher_req = req;
3462 	ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3463 	ta_ctx->aes_opmode = SEP_AES_CBC;
3464 	ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3465 	ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3466 
3467 	/* lock necessary so that only one entity touches the queues */
3468 	spin_lock_irq(&queue_lock);
3469 	error = crypto_enqueue_request(&sep_queue, &req->base);
3470 
3471 	if ((error != 0) && (error != -EINPROGRESS))
3472 		pr_debug(" sep - crypto enqueue failed: %x\n",
3473 			error);
3474 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3475 		sep_dequeuer, (void *)&sep_queue);
3476 	if (error1)
3477 		pr_debug(" sep - workqueue submit failed: %x\n",
3478 			error1);
3479 	spin_unlock_irq(&queue_lock);
3480 	/* We return result of crypto enqueue */
3481 	return error;
3482 }
3483 
sep_des_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)3484 static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3485 	unsigned int keylen)
3486 {
3487 	struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3488 	struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
3489 	u32 *flags  = &ctfm->crt_flags;
3490 
3491 	pr_debug("sep des setkey\n");
3492 
3493 	switch (keylen) {
3494 	case DES_KEY_SIZE:
3495 		sctx->des_nbr_keys = DES_KEY_1;
3496 		break;
3497 	case DES_KEY_SIZE * 2:
3498 		sctx->des_nbr_keys = DES_KEY_2;
3499 		break;
3500 	case DES_KEY_SIZE * 3:
3501 		sctx->des_nbr_keys = DES_KEY_3;
3502 		break;
3503 	default:
3504 		pr_debug("invalid key size %x\n",
3505 			keylen);
3506 		return -EINVAL;
3507 	}
3508 
3509 	if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY) &&
3510 		(sep_weak_key(key, keylen))) {
3511 
3512 		*flags |= CRYPTO_TFM_RES_WEAK_KEY;
3513 		pr_debug("weak key\n");
3514 		return -EINVAL;
3515 	}
3516 
3517 	memset(&sctx->key.des, 0, sizeof(struct sep_des_key));
3518 	memcpy(&sctx->key.des.key1, key, keylen);
3519 	sctx->keylen = keylen;
3520 	/* Indicate to encrypt/decrypt function to send key to SEP */
3521 	sctx->key_sent = 0;
3522 
3523 	return 0;
3524 }
3525 
sep_des_ebc_encrypt(struct ablkcipher_request * req)3526 static int sep_des_ebc_encrypt(struct ablkcipher_request *req)
3527 {
3528 	int error;
3529 	int error1;
3530 	struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3531 
3532 	pr_debug("sep - doing des ecb encrypt\n");
3533 
3534 	/* Clear out task context */
3535 	memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3536 
3537 	ta_ctx->sep_used = sep_dev;
3538 	ta_ctx->current_request = DES_ECB;
3539 	ta_ctx->current_hash_req = NULL;
3540 	ta_ctx->current_cypher_req = req;
3541 	ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3542 	ta_ctx->des_opmode = SEP_DES_ECB;
3543 	ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3544 	ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3545 
3546 	/* lock necessary so that only one entity touches the queues */
3547 	spin_lock_irq(&queue_lock);
3548 	error = crypto_enqueue_request(&sep_queue, &req->base);
3549 
3550 	if ((error != 0) && (error != -EINPROGRESS))
3551 		pr_debug(" sep - crypto enqueue failed: %x\n",
3552 			error);
3553 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3554 		sep_dequeuer, (void *)&sep_queue);
3555 	if (error1)
3556 		pr_debug(" sep - workqueue submit failed: %x\n",
3557 			error1);
3558 	spin_unlock_irq(&queue_lock);
3559 	/* We return result of crypto enqueue */
3560 	return error;
3561 }
3562 
sep_des_ebc_decrypt(struct ablkcipher_request * req)3563 static int sep_des_ebc_decrypt(struct ablkcipher_request *req)
3564 {
3565 	int error;
3566 	int error1;
3567 	struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3568 
3569 	pr_debug("sep - doing des ecb decrypt\n");
3570 
3571 	/* Clear out task context */
3572 	memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3573 
3574 	ta_ctx->sep_used = sep_dev;
3575 	ta_ctx->current_request = DES_ECB;
3576 	ta_ctx->current_hash_req = NULL;
3577 	ta_ctx->current_cypher_req = req;
3578 	ta_ctx->des_encmode = SEP_DES_DECRYPT;
3579 	ta_ctx->des_opmode = SEP_DES_ECB;
3580 	ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3581 	ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3582 
3583 	/* lock necessary so that only one entity touches the queues */
3584 	spin_lock_irq(&queue_lock);
3585 	error = crypto_enqueue_request(&sep_queue, &req->base);
3586 
3587 	if ((error != 0) && (error != -EINPROGRESS))
3588 		pr_debug(" sep - crypto enqueue failed: %x\n",
3589 			error);
3590 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3591 		sep_dequeuer, (void *)&sep_queue);
3592 	if (error1)
3593 		pr_debug(" sep - workqueue submit failed: %x\n",
3594 			error1);
3595 	spin_unlock_irq(&queue_lock);
3596 	/* We return result of crypto enqueue */
3597 	return error;
3598 }
3599 
sep_des_cbc_encrypt(struct ablkcipher_request * req)3600 static int sep_des_cbc_encrypt(struct ablkcipher_request *req)
3601 {
3602 	int error;
3603 	int error1;
3604 	struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3605 
3606 	pr_debug("sep - doing des cbc encrypt\n");
3607 
3608 	/* Clear out task context */
3609 	memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3610 
3611 	ta_ctx->sep_used = sep_dev;
3612 	ta_ctx->current_request = DES_CBC;
3613 	ta_ctx->current_hash_req = NULL;
3614 	ta_ctx->current_cypher_req = req;
3615 	ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3616 	ta_ctx->des_opmode = SEP_DES_CBC;
3617 	ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3618 	ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3619 
3620 	/* lock necessary so that only one entity touches the queues */
3621 	spin_lock_irq(&queue_lock);
3622 	error = crypto_enqueue_request(&sep_queue, &req->base);
3623 
3624 	if ((error != 0) && (error != -EINPROGRESS))
3625 		pr_debug(" sep - crypto enqueue failed: %x\n",
3626 			error);
3627 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3628 		sep_dequeuer, (void *)&sep_queue);
3629 	if (error1)
3630 		pr_debug(" sep - workqueue submit failed: %x\n",
3631 			error1);
3632 	spin_unlock_irq(&queue_lock);
3633 	/* We return result of crypto enqueue */
3634 	return error;
3635 }
3636 
sep_des_cbc_decrypt(struct ablkcipher_request * req)3637 static int sep_des_cbc_decrypt(struct ablkcipher_request *req)
3638 {
3639 	int error;
3640 	int error1;
3641 	struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3642 
3643 	pr_debug("sep - doing des ecb decrypt\n");
3644 
3645 	/* Clear out task context */
3646 	memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3647 
3648 	ta_ctx->sep_used = sep_dev;
3649 	ta_ctx->current_request = DES_CBC;
3650 	ta_ctx->current_hash_req = NULL;
3651 	ta_ctx->current_cypher_req = req;
3652 	ta_ctx->des_encmode = SEP_DES_DECRYPT;
3653 	ta_ctx->des_opmode = SEP_DES_CBC;
3654 	ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3655 	ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3656 
3657 	/* lock necessary so that only one entity touches the queues */
3658 	spin_lock_irq(&queue_lock);
3659 	error = crypto_enqueue_request(&sep_queue, &req->base);
3660 
3661 	if ((error != 0) && (error != -EINPROGRESS))
3662 		pr_debug(" sep - crypto enqueue failed: %x\n",
3663 			error);
3664 	error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3665 		sep_dequeuer, (void *)&sep_queue);
3666 	if (error1)
3667 		pr_debug(" sep - workqueue submit failed: %x\n",
3668 			error1);
3669 	spin_unlock_irq(&queue_lock);
3670 	/* We return result of crypto enqueue */
3671 	return error;
3672 }
3673 
3674 static struct ahash_alg hash_algs[] = {
3675 {
3676 	.init		= sep_sha1_init,
3677 	.update		= sep_sha1_update,
3678 	.final		= sep_sha1_final,
3679 	.digest		= sep_sha1_digest,
3680 	.finup		= sep_sha1_finup,
3681 	.halg		= {
3682 		.digestsize	= SHA1_DIGEST_SIZE,
3683 		.base	= {
3684 		.cra_name		= "sha1",
3685 		.cra_driver_name	= "sha1-sep",
3686 		.cra_priority		= 100,
3687 		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
3688 						CRYPTO_ALG_ASYNC,
3689 		.cra_blocksize		= SHA1_BLOCK_SIZE,
3690 		.cra_ctxsize		= sizeof(struct sep_system_ctx),
3691 		.cra_alignmask		= 0,
3692 		.cra_module		= THIS_MODULE,
3693 		.cra_init		= sep_hash_cra_init,
3694 		.cra_exit		= sep_hash_cra_exit,
3695 		}
3696 	}
3697 },
3698 {
3699 	.init		= sep_md5_init,
3700 	.update		= sep_md5_update,
3701 	.final		= sep_md5_final,
3702 	.digest		= sep_md5_digest,
3703 	.finup		= sep_md5_finup,
3704 	.halg		= {
3705 		.digestsize	= MD5_DIGEST_SIZE,
3706 		.base	= {
3707 		.cra_name		= "md5",
3708 		.cra_driver_name	= "md5-sep",
3709 		.cra_priority		= 100,
3710 		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
3711 						CRYPTO_ALG_ASYNC,
3712 		.cra_blocksize		= SHA1_BLOCK_SIZE,
3713 		.cra_ctxsize		= sizeof(struct sep_system_ctx),
3714 		.cra_alignmask		= 0,
3715 		.cra_module		= THIS_MODULE,
3716 		.cra_init		= sep_hash_cra_init,
3717 		.cra_exit		= sep_hash_cra_exit,
3718 		}
3719 	}
3720 },
3721 {
3722 	.init		= sep_sha224_init,
3723 	.update		= sep_sha224_update,
3724 	.final		= sep_sha224_final,
3725 	.digest		= sep_sha224_digest,
3726 	.finup		= sep_sha224_finup,
3727 	.halg		= {
3728 		.digestsize	= SHA224_DIGEST_SIZE,
3729 		.base	= {
3730 		.cra_name		= "sha224",
3731 		.cra_driver_name	= "sha224-sep",
3732 		.cra_priority		= 100,
3733 		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
3734 						CRYPTO_ALG_ASYNC,
3735 		.cra_blocksize		= SHA224_BLOCK_SIZE,
3736 		.cra_ctxsize		= sizeof(struct sep_system_ctx),
3737 		.cra_alignmask		= 0,
3738 		.cra_module		= THIS_MODULE,
3739 		.cra_init		= sep_hash_cra_init,
3740 		.cra_exit		= sep_hash_cra_exit,
3741 		}
3742 	}
3743 },
3744 {
3745 	.init		= sep_sha256_init,
3746 	.update		= sep_sha256_update,
3747 	.final		= sep_sha256_final,
3748 	.digest		= sep_sha256_digest,
3749 	.finup		= sep_sha256_finup,
3750 	.halg		= {
3751 		.digestsize	= SHA256_DIGEST_SIZE,
3752 		.base	= {
3753 		.cra_name		= "sha256",
3754 		.cra_driver_name	= "sha256-sep",
3755 		.cra_priority		= 100,
3756 		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
3757 						CRYPTO_ALG_ASYNC,
3758 		.cra_blocksize		= SHA256_BLOCK_SIZE,
3759 		.cra_ctxsize		= sizeof(struct sep_system_ctx),
3760 		.cra_alignmask		= 0,
3761 		.cra_module		= THIS_MODULE,
3762 		.cra_init		= sep_hash_cra_init,
3763 		.cra_exit		= sep_hash_cra_exit,
3764 		}
3765 	}
3766 }
3767 };
3768 
3769 static struct crypto_alg crypto_algs[] = {
3770 {
3771 	.cra_name		= "ecb(aes)",
3772 	.cra_driver_name	= "ecb-aes-sep",
3773 	.cra_priority		= 100,
3774 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3775 	.cra_blocksize		= AES_BLOCK_SIZE,
3776 	.cra_ctxsize		= sizeof(struct sep_system_ctx),
3777 	.cra_alignmask		= 0,
3778 	.cra_type		= &crypto_ablkcipher_type,
3779 	.cra_module		= THIS_MODULE,
3780 	.cra_init		= sep_crypto_init,
3781 	.cra_exit		= sep_crypto_exit,
3782 	.cra_u.ablkcipher = {
3783 		.min_keysize	= AES_MIN_KEY_SIZE,
3784 		.max_keysize	= AES_MAX_KEY_SIZE,
3785 		.setkey		= sep_aes_setkey,
3786 		.encrypt	= sep_aes_ecb_encrypt,
3787 		.decrypt	= sep_aes_ecb_decrypt,
3788 	}
3789 },
3790 {
3791 	.cra_name		= "cbc(aes)",
3792 	.cra_driver_name	= "cbc-aes-sep",
3793 	.cra_priority		= 100,
3794 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3795 	.cra_blocksize		= AES_BLOCK_SIZE,
3796 	.cra_ctxsize		= sizeof(struct sep_system_ctx),
3797 	.cra_alignmask		= 0,
3798 	.cra_type		= &crypto_ablkcipher_type,
3799 	.cra_module		= THIS_MODULE,
3800 	.cra_init		= sep_crypto_init,
3801 	.cra_exit		= sep_crypto_exit,
3802 	.cra_u.ablkcipher = {
3803 		.min_keysize	= AES_MIN_KEY_SIZE,
3804 		.max_keysize	= AES_MAX_KEY_SIZE,
3805 		.setkey		= sep_aes_setkey,
3806 		.encrypt	= sep_aes_cbc_encrypt,
3807 		.ivsize		= AES_BLOCK_SIZE,
3808 		.decrypt	= sep_aes_cbc_decrypt,
3809 	}
3810 },
3811 {
3812 	.cra_name		= "ebc(des)",
3813 	.cra_driver_name	= "ebc-des-sep",
3814 	.cra_priority		= 100,
3815 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3816 	.cra_blocksize		= DES_BLOCK_SIZE,
3817 	.cra_ctxsize		= sizeof(struct sep_system_ctx),
3818 	.cra_alignmask		= 0,
3819 	.cra_type		= &crypto_ablkcipher_type,
3820 	.cra_module		= THIS_MODULE,
3821 	.cra_init		= sep_crypto_init,
3822 	.cra_exit		= sep_crypto_exit,
3823 	.cra_u.ablkcipher = {
3824 		.min_keysize	= DES_KEY_SIZE,
3825 		.max_keysize	= DES_KEY_SIZE,
3826 		.setkey		= sep_des_setkey,
3827 		.encrypt	= sep_des_ebc_encrypt,
3828 		.decrypt	= sep_des_ebc_decrypt,
3829 	}
3830 },
3831 {
3832 	.cra_name		= "cbc(des)",
3833 	.cra_driver_name	= "cbc-des-sep",
3834 	.cra_priority		= 100,
3835 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3836 	.cra_blocksize		= DES_BLOCK_SIZE,
3837 	.cra_ctxsize		= sizeof(struct sep_system_ctx),
3838 	.cra_alignmask		= 0,
3839 	.cra_type		= &crypto_ablkcipher_type,
3840 	.cra_module		= THIS_MODULE,
3841 	.cra_init		= sep_crypto_init,
3842 	.cra_exit		= sep_crypto_exit,
3843 	.cra_u.ablkcipher = {
3844 		.min_keysize	= DES_KEY_SIZE,
3845 		.max_keysize	= DES_KEY_SIZE,
3846 		.setkey		= sep_des_setkey,
3847 		.encrypt	= sep_des_cbc_encrypt,
3848 		.ivsize		= DES_BLOCK_SIZE,
3849 		.decrypt	= sep_des_cbc_decrypt,
3850 	}
3851 },
3852 {
3853 	.cra_name		= "ebc(des3-ede)",
3854 	.cra_driver_name	= "ebc-des3-ede-sep",
3855 	.cra_priority		= 100,
3856 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3857 	.cra_blocksize		= DES_BLOCK_SIZE,
3858 	.cra_ctxsize		= sizeof(struct sep_system_ctx),
3859 	.cra_alignmask		= 0,
3860 	.cra_type		= &crypto_ablkcipher_type,
3861 	.cra_module		= THIS_MODULE,
3862 	.cra_init		= sep_crypto_init,
3863 	.cra_exit		= sep_crypto_exit,
3864 	.cra_u.ablkcipher = {
3865 		.min_keysize	= DES3_EDE_KEY_SIZE,
3866 		.max_keysize	= DES3_EDE_KEY_SIZE,
3867 		.setkey		= sep_des_setkey,
3868 		.encrypt	= sep_des_ebc_encrypt,
3869 		.decrypt	= sep_des_ebc_decrypt,
3870 	}
3871 },
3872 {
3873 	.cra_name		= "cbc(des3-ede)",
3874 	.cra_driver_name	= "cbc-des3--ede-sep",
3875 	.cra_priority		= 100,
3876 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3877 	.cra_blocksize		= DES_BLOCK_SIZE,
3878 	.cra_ctxsize		= sizeof(struct sep_system_ctx),
3879 	.cra_alignmask		= 0,
3880 	.cra_type		= &crypto_ablkcipher_type,
3881 	.cra_module		= THIS_MODULE,
3882 	.cra_init		= sep_crypto_init,
3883 	.cra_exit		= sep_crypto_exit,
3884 	.cra_u.ablkcipher = {
3885 		.min_keysize	= DES3_EDE_KEY_SIZE,
3886 		.max_keysize	= DES3_EDE_KEY_SIZE,
3887 		.setkey		= sep_des_setkey,
3888 		.encrypt	= sep_des_cbc_encrypt,
3889 		.decrypt	= sep_des_cbc_decrypt,
3890 	}
3891 }
3892 };
3893 
sep_crypto_setup(void)3894 int sep_crypto_setup(void)
3895 {
3896 	int err, i, j, k;
3897 	tasklet_init(&sep_dev->finish_tasklet, sep_finish,
3898 		(unsigned long)sep_dev);
3899 
3900 	crypto_init_queue(&sep_queue, SEP_QUEUE_LENGTH);
3901 
3902 	sep_dev->workqueue = create_singlethread_workqueue(
3903 		"sep_crypto_workqueue");
3904 	if (!sep_dev->workqueue) {
3905 		dev_warn(&sep_dev->pdev->dev, "cant create workqueue\n");
3906 		return -ENOMEM;
3907 	}
3908 
3909 	spin_lock_init(&queue_lock);
3910 
3911 	err = 0;
3912 	for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
3913 		err = crypto_register_ahash(&hash_algs[i]);
3914 		if (err)
3915 			goto err_algs;
3916 	}
3917 
3918 	err = 0;
3919 	for (j = 0; j < ARRAY_SIZE(crypto_algs); j++) {
3920 		err = crypto_register_alg(&crypto_algs[j]);
3921 		if (err)
3922 			goto err_crypto_algs;
3923 	}
3924 
3925 	return err;
3926 
3927 err_algs:
3928 	for (k = 0; k < i; k++)
3929 		crypto_unregister_ahash(&hash_algs[k]);
3930 	return err;
3931 
3932 err_crypto_algs:
3933 	for (k = 0; k < j; k++)
3934 		crypto_unregister_alg(&crypto_algs[k]);
3935 	goto err_algs;
3936 }
3937 
sep_crypto_takedown(void)3938 void sep_crypto_takedown(void)
3939 {
3940 
3941 	int i;
3942 
3943 	for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
3944 		crypto_unregister_ahash(&hash_algs[i]);
3945 	for (i = 0; i < ARRAY_SIZE(crypto_algs); i++)
3946 		crypto_unregister_alg(&crypto_algs[i]);
3947 
3948 	tasklet_kill(&sep_dev->finish_tasklet);
3949 }
3950 
3951 #endif
3952