• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Stream co-processor driver for the ETRAX FS
4  *
5  *    Copyright (C) 2003-2007  Axis Communications AB
6  */
7 
8 #include <linux/init.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/string.h>
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/spinlock.h>
16 #include <linux/stddef.h>
17 
18 #include <linux/uaccess.h>
19 #include <asm/io.h>
20 #include <linux/atomic.h>
21 
22 #include <linux/list.h>
23 #include <linux/interrupt.h>
24 
25 #include <asm/signal.h>
26 #include <asm/irq.h>
27 
28 #include <dma.h>
29 #include <hwregs/dma.h>
30 #include <hwregs/reg_map.h>
31 #include <hwregs/reg_rdwr.h>
32 #include <hwregs/intr_vect_defs.h>
33 
34 #include <hwregs/strcop.h>
35 #include <hwregs/strcop_defs.h>
36 #include <cryptocop.h>
37 
38 #ifdef CONFIG_ETRAXFS
39 #define IN_DMA 9
40 #define OUT_DMA 8
41 #define IN_DMA_INST regi_dma9
42 #define OUT_DMA_INST regi_dma8
43 #define DMA_IRQ DMA9_INTR_VECT
44 #else
45 #define IN_DMA 3
46 #define OUT_DMA 2
47 #define IN_DMA_INST regi_dma3
48 #define OUT_DMA_INST regi_dma2
49 #define DMA_IRQ DMA3_INTR_VECT
50 #endif
51 
52 #define DESCR_ALLOC_PAD  (31)
53 
54 struct cryptocop_dma_desc {
55 	char *free_buf; /* If non-null will be kfreed in free_cdesc() */
56 	dma_descr_data *dma_descr;
57 
58 	unsigned char dma_descr_buf[sizeof(dma_descr_data) + DESCR_ALLOC_PAD];
59 
60 	unsigned int from_pool:1; /* If 1 'allocated' from the descriptor pool. */
61 	struct cryptocop_dma_desc *next;
62 };
63 
64 
65 struct cryptocop_int_operation{
66 	void                        *alloc_ptr;
67 	cryptocop_session_id        sid;
68 
69 	dma_descr_context           ctx_out;
70 	dma_descr_context           ctx_in;
71 
72 	/* DMA descriptors allocated by driver. */
73 	struct cryptocop_dma_desc   *cdesc_out;
74 	struct cryptocop_dma_desc   *cdesc_in;
75 
76 	/* Strcop config to use. */
77 	cryptocop_3des_mode         tdes_mode;
78 	cryptocop_csum_type         csum_mode;
79 
80 	/* DMA descrs provided by consumer. */
81 	dma_descr_data              *ddesc_out;
82 	dma_descr_data              *ddesc_in;
83 };
84 
85 
86 struct cryptocop_tfrm_ctx {
87 	cryptocop_tfrm_id tid;
88 	unsigned int blocklength;
89 
90 	unsigned int start_ix;
91 
92 	struct cryptocop_tfrm_cfg *tcfg;
93 	struct cryptocop_transform_ctx *tctx;
94 
95 	unsigned char previous_src;
96 	unsigned char current_src;
97 
98 	/* Values to use in metadata out. */
99 	unsigned char hash_conf;
100 	unsigned char hash_mode;
101 	unsigned char ciph_conf;
102 	unsigned char cbcmode;
103 	unsigned char decrypt;
104 
105 	unsigned int requires_padding:1;
106 	unsigned int strict_block_length:1;
107 	unsigned int active:1;
108 	unsigned int done:1;
109 	size_t consumed;
110 	size_t produced;
111 
112 	/* Pad (input) descriptors to put in the DMA out list when the transform
113 	 * output is put on the DMA in list. */
114 	struct cryptocop_dma_desc *pad_descs;
115 
116 	struct cryptocop_tfrm_ctx *prev_src;
117 	struct cryptocop_tfrm_ctx *curr_src;
118 
119 	/* Mapping to HW. */
120 	unsigned char unit_no;
121 };
122 
123 
124 struct cryptocop_private{
125 	cryptocop_session_id sid;
126 	struct cryptocop_private *next;
127 };
128 
129 /* Session list. */
130 
131 struct cryptocop_transform_ctx{
132 	struct cryptocop_transform_init init;
133 	unsigned char dec_key[CRYPTOCOP_MAX_KEY_LENGTH];
134 	unsigned int dec_key_set:1;
135 
136 	struct cryptocop_transform_ctx *next;
137 };
138 
139 
140 struct cryptocop_session{
141 	cryptocop_session_id sid;
142 
143 	struct cryptocop_transform_ctx *tfrm_ctx;
144 
145 	struct cryptocop_session *next;
146 };
147 
148 /* Priority levels for jobs sent to the cryptocop.  Checksum operations from
149    kernel have highest priority since TCPIP stack processing must not
150    be a bottleneck. */
151 typedef enum {
152 	cryptocop_prio_kernel_csum = 0,
153 	cryptocop_prio_kernel = 1,
154 	cryptocop_prio_user = 2,
155 	cryptocop_prio_no_prios = 3
156 } cryptocop_queue_priority;
157 
158 struct cryptocop_prio_queue{
159 	struct list_head jobs;
160 	cryptocop_queue_priority prio;
161 };
162 
163 struct cryptocop_prio_job{
164 	struct list_head node;
165 	cryptocop_queue_priority prio;
166 
167 	struct cryptocop_operation *oper;
168 	struct cryptocop_int_operation *iop;
169 };
170 
171 struct ioctl_job_cb_ctx {
172 	unsigned int processed:1;
173 };
174 
175 
176 static struct cryptocop_session *cryptocop_sessions = NULL;
177 spinlock_t cryptocop_sessions_lock;
178 
179 /* Next Session ID to assign. */
180 static cryptocop_session_id next_sid = 1;
181 
182 /* Pad for checksum. */
183 static const char csum_zero_pad[1] = {0x00};
184 
185 /* Trash buffer for mem2mem operations. */
186 #define MEM2MEM_DISCARD_BUF_LENGTH  (512)
187 static unsigned char mem2mem_discard_buf[MEM2MEM_DISCARD_BUF_LENGTH];
188 
189 /* Descriptor pool. */
190 /* FIXME Tweak this value. */
191 #define CRYPTOCOP_DESCRIPTOR_POOL_SIZE   (100)
192 static struct cryptocop_dma_desc descr_pool[CRYPTOCOP_DESCRIPTOR_POOL_SIZE];
193 static struct cryptocop_dma_desc *descr_pool_free_list;
194 static int descr_pool_no_free;
195 static spinlock_t descr_pool_lock;
196 
197 /* Lock to stop cryptocop to start processing of a new operation. The holder
198    of this lock MUST call cryptocop_start_job() after it is unlocked. */
199 spinlock_t cryptocop_process_lock;
200 
201 static struct cryptocop_prio_queue cryptocop_job_queues[cryptocop_prio_no_prios];
202 static spinlock_t cryptocop_job_queue_lock;
203 static struct cryptocop_prio_job *cryptocop_running_job = NULL;
204 static spinlock_t running_job_lock;
205 
206 /* The interrupt handler appends completed jobs to this list. The scehduled
207  * tasklet removes them upon sending the response to the crypto consumer. */
208 static struct list_head cryptocop_completed_jobs;
209 static spinlock_t cryptocop_completed_jobs_lock;
210 
211 DECLARE_WAIT_QUEUE_HEAD(cryptocop_ioc_process_wq);
212 
213 
214 /** Local functions. **/
215 
216 static int cryptocop_open(struct inode *, struct file *);
217 
218 static int cryptocop_release(struct inode *, struct file *);
219 
220 static long cryptocop_ioctl(struct file *file,
221 			   unsigned int cmd, unsigned long arg);
222 
223 static void cryptocop_start_job(void);
224 
225 static int cryptocop_job_queue_insert(cryptocop_queue_priority prio, struct cryptocop_operation *operation);
226 static int cryptocop_job_setup(struct cryptocop_prio_job **pj, struct cryptocop_operation *operation);
227 
228 static int cryptocop_job_queue_init(void);
229 static void cryptocop_job_queue_close(void);
230 
231 static int create_md5_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length);
232 
233 static int create_sha1_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length);
234 
235 static int transform_ok(struct cryptocop_transform_init *tinit);
236 
237 static struct cryptocop_session *get_session(cryptocop_session_id sid);
238 
239 static struct cryptocop_transform_ctx *get_transform_ctx(struct cryptocop_session *sess, cryptocop_tfrm_id tid);
240 
241 static void delete_internal_operation(struct cryptocop_int_operation *iop);
242 
243 static void get_aes_decrypt_key(unsigned char *dec_key, const unsigned  char *key, unsigned int keylength);
244 
245 static int init_stream_coprocessor(void);
246 
247 static void __exit exit_stream_coprocessor(void);
248 
249 /*#define LDEBUG*/
250 #ifdef LDEBUG
251 #define DEBUG(s) s
252 #define DEBUG_API(s) s
253 static void print_cryptocop_operation(struct cryptocop_operation *cop);
254 static void print_dma_descriptors(struct cryptocop_int_operation *iop);
255 static void print_strcop_crypto_op(struct strcop_crypto_op *cop);
256 static void print_lock_status(void);
257 static void print_user_dma_lists(struct cryptocop_dma_list_operation *dma_op);
258 #define assert(s) do{if (!(s)) panic(#s);} while(0);
259 #else
260 #define DEBUG(s)
261 #define DEBUG_API(s)
262 #define assert(s)
263 #endif
264 
265 
266 /* Transform constants. */
267 #define DES_BLOCK_LENGTH   (8)
268 #define AES_BLOCK_LENGTH   (16)
269 #define MD5_BLOCK_LENGTH   (64)
270 #define SHA1_BLOCK_LENGTH  (64)
271 #define CSUM_BLOCK_LENGTH  (2)
272 #define MD5_STATE_LENGTH   (16)
273 #define SHA1_STATE_LENGTH  (20)
274 
275 /* The device number. */
276 #define CRYPTOCOP_MAJOR    (254)
277 #define CRYPTOCOP_MINOR    (0)
278 
279 
280 
281 const struct file_operations cryptocop_fops = {
282 	.owner		= THIS_MODULE,
283 	.open		= cryptocop_open,
284 	.release	= cryptocop_release,
285 	.unlocked_ioctl = cryptocop_ioctl,
286 	.llseek		= noop_llseek,
287 };
288 
289 
free_cdesc(struct cryptocop_dma_desc * cdesc)290 static void free_cdesc(struct cryptocop_dma_desc *cdesc)
291 {
292 	DEBUG(printk("free_cdesc: cdesc 0x%p, from_pool=%d\n", cdesc, cdesc->from_pool));
293 	kfree(cdesc->free_buf);
294 
295 	if (cdesc->from_pool) {
296 		unsigned long int flags;
297 		spin_lock_irqsave(&descr_pool_lock, flags);
298 		cdesc->next = descr_pool_free_list;
299 		descr_pool_free_list = cdesc;
300 		++descr_pool_no_free;
301 		spin_unlock_irqrestore(&descr_pool_lock, flags);
302 	} else {
303 		kfree(cdesc);
304 	}
305 }
306 
307 
alloc_cdesc(int alloc_flag)308 static struct cryptocop_dma_desc *alloc_cdesc(int alloc_flag)
309 {
310 	int use_pool = (alloc_flag & GFP_ATOMIC) ? 1 : 0;
311 	struct cryptocop_dma_desc *cdesc;
312 
313 	if (use_pool) {
314 		unsigned long int flags;
315 		spin_lock_irqsave(&descr_pool_lock, flags);
316 		if (!descr_pool_free_list) {
317 			spin_unlock_irqrestore(&descr_pool_lock, flags);
318 			DEBUG_API(printk("alloc_cdesc: pool is empty\n"));
319 			return NULL;
320 		}
321 		cdesc = descr_pool_free_list;
322 		descr_pool_free_list = descr_pool_free_list->next;
323 		--descr_pool_no_free;
324 		spin_unlock_irqrestore(&descr_pool_lock, flags);
325 		cdesc->from_pool = 1;
326 	} else {
327 		cdesc = kmalloc(sizeof(struct cryptocop_dma_desc), alloc_flag);
328 		if (!cdesc) {
329 			DEBUG_API(printk("alloc_cdesc: kmalloc\n"));
330 			return NULL;
331 		}
332 		cdesc->from_pool = 0;
333 	}
334 	cdesc->dma_descr = (dma_descr_data*)(((unsigned long int)cdesc + offsetof(struct cryptocop_dma_desc, dma_descr_buf) + DESCR_ALLOC_PAD) & ~0x0000001F);
335 
336 	cdesc->next = NULL;
337 
338 	cdesc->free_buf = NULL;
339 	cdesc->dma_descr->out_eop = 0;
340 	cdesc->dma_descr->in_eop = 0;
341 	cdesc->dma_descr->intr = 0;
342 	cdesc->dma_descr->eol = 0;
343 	cdesc->dma_descr->wait = 0;
344 	cdesc->dma_descr->buf = NULL;
345 	cdesc->dma_descr->after = NULL;
346 
347 	DEBUG_API(printk("alloc_cdesc: return 0x%p, cdesc->dma_descr=0x%p, from_pool=%d\n", cdesc, cdesc->dma_descr, cdesc->from_pool));
348 	return cdesc;
349 }
350 
351 
setup_descr_chain(struct cryptocop_dma_desc * cd)352 static void setup_descr_chain(struct cryptocop_dma_desc *cd)
353 {
354 	DEBUG(printk("setup_descr_chain: entering\n"));
355 	while (cd) {
356 		if (cd->next) {
357 			cd->dma_descr->next = (dma_descr_data*)virt_to_phys(cd->next->dma_descr);
358 		} else {
359 			cd->dma_descr->next = NULL;
360 		}
361 		cd = cd->next;
362 	}
363 	DEBUG(printk("setup_descr_chain: exit\n"));
364 }
365 
366 
367 /* Create a pad descriptor for the transform.
368  * Return -1 for error, 0 if pad created. */
create_pad_descriptor(struct cryptocop_tfrm_ctx * tc,struct cryptocop_dma_desc ** pad_desc,int alloc_flag)369 static int create_pad_descriptor(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **pad_desc, int alloc_flag)
370 {
371 	struct cryptocop_dma_desc        *cdesc = NULL;
372 	int                              error = 0;
373 	struct strcop_meta_out           mo = {
374 		.ciphsel = src_none,
375 		.hashsel = src_none,
376 		.csumsel = src_none
377 	};
378 	char                             *pad;
379 	size_t                           plen;
380 
381 	DEBUG(printk("create_pad_descriptor: start.\n"));
382 	/* Setup pad descriptor. */
383 
384 	DEBUG(printk("create_pad_descriptor: setting up padding.\n"));
385 	cdesc = alloc_cdesc(alloc_flag);
386 	if (!cdesc){
387 		DEBUG_API(printk("create_pad_descriptor: alloc pad desc\n"));
388 		goto error_cleanup;
389 	}
390 	switch (tc->unit_no) {
391 	case src_md5:
392 		error = create_md5_pad(alloc_flag, tc->consumed, &pad, &plen);
393 		if (error){
394 			DEBUG_API(printk("create_pad_descriptor: create_md5_pad_failed\n"));
395 			goto error_cleanup;
396 		}
397 		cdesc->free_buf = pad;
398 		mo.hashsel = src_dma;
399 		mo.hashconf = tc->hash_conf;
400 		mo.hashmode = tc->hash_mode;
401 		break;
402 	case src_sha1:
403 		error = create_sha1_pad(alloc_flag, tc->consumed, &pad, &plen);
404 		if (error){
405 			DEBUG_API(printk("create_pad_descriptor: create_sha1_pad_failed\n"));
406 			goto error_cleanup;
407 		}
408 		cdesc->free_buf = pad;
409 		mo.hashsel = src_dma;
410 		mo.hashconf = tc->hash_conf;
411 		mo.hashmode = tc->hash_mode;
412 		break;
413 	case src_csum:
414 		if (tc->consumed % tc->blocklength){
415 			pad = (char*)csum_zero_pad;
416 			plen = 1;
417 		} else {
418 			pad = (char*)cdesc; /* Use any pointer. */
419 			plen = 0;
420 		}
421 		mo.csumsel = src_dma;
422 		break;
423 	}
424 	cdesc->dma_descr->wait = 1;
425 	cdesc->dma_descr->out_eop = 1; /* Since this is a pad output is pushed.  EOP is ok here since the padded unit is the only one active. */
426 	cdesc->dma_descr->buf = (char*)virt_to_phys((char*)pad);
427 	cdesc->dma_descr->after = cdesc->dma_descr->buf + plen;
428 
429 	cdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo);
430 	*pad_desc = cdesc;
431 
432 	return 0;
433 
434  error_cleanup:
435 	if (cdesc) free_cdesc(cdesc);
436 	return -1;
437 }
438 
439 
setup_key_dl_desc(struct cryptocop_tfrm_ctx * tc,struct cryptocop_dma_desc ** kd,int alloc_flag)440 static int setup_key_dl_desc(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **kd, int alloc_flag)
441 {
442 	struct cryptocop_dma_desc  *key_desc = alloc_cdesc(alloc_flag);
443 	struct strcop_meta_out     mo = {0};
444 
445 	DEBUG(printk("setup_key_dl_desc\n"));
446 
447 	if (!key_desc) {
448 		DEBUG_API(printk("setup_key_dl_desc: failed descriptor allocation.\n"));
449 		return -ENOMEM;
450 	}
451 
452 	/* Download key. */
453 	if ((tc->tctx->init.alg == cryptocop_alg_aes) && (tc->tcfg->flags & CRYPTOCOP_DECRYPT)) {
454 		/* Precook the AES decrypt key. */
455 		if (!tc->tctx->dec_key_set){
456 			get_aes_decrypt_key(tc->tctx->dec_key, tc->tctx->init.key, tc->tctx->init.keylen);
457 			tc->tctx->dec_key_set = 1;
458 		}
459 		key_desc->dma_descr->buf = (char*)virt_to_phys(tc->tctx->dec_key);
460 		key_desc->dma_descr->after = key_desc->dma_descr->buf + tc->tctx->init.keylen/8;
461 	} else {
462 		key_desc->dma_descr->buf = (char*)virt_to_phys(tc->tctx->init.key);
463 		key_desc->dma_descr->after = key_desc->dma_descr->buf + tc->tctx->init.keylen/8;
464 	}
465 	/* Setup metadata. */
466 	mo.dlkey = 1;
467 	switch (tc->tctx->init.keylen) {
468 	case 64:
469 		mo.decrypt = 0;
470 		mo.hashmode = 0;
471 		break;
472 	case 128:
473 		mo.decrypt = 0;
474 		mo.hashmode = 1;
475 		break;
476 	case 192:
477 		mo.decrypt = 1;
478 		mo.hashmode = 0;
479 		break;
480 	case 256:
481 		mo.decrypt = 1;
482 		mo.hashmode = 1;
483 		break;
484 	default:
485 		break;
486 	}
487 	mo.ciphsel = mo.hashsel = mo.csumsel = src_none;
488 	key_desc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo);
489 
490 	key_desc->dma_descr->out_eop = 1;
491 	key_desc->dma_descr->wait = 1;
492 	key_desc->dma_descr->intr = 0;
493 
494 	*kd = key_desc;
495 	return 0;
496 }
497 
setup_cipher_iv_desc(struct cryptocop_tfrm_ctx * tc,struct cryptocop_dma_desc ** id,int alloc_flag)498 static int setup_cipher_iv_desc(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **id, int alloc_flag)
499 {
500 	struct cryptocop_dma_desc  *iv_desc = alloc_cdesc(alloc_flag);
501 	struct strcop_meta_out     mo = {0};
502 
503 	DEBUG(printk("setup_cipher_iv_desc\n"));
504 
505 	if (!iv_desc) {
506 		DEBUG_API(printk("setup_cipher_iv_desc: failed CBC IV descriptor allocation.\n"));
507 		return -ENOMEM;
508 	}
509 	/* Download IV. */
510 	iv_desc->dma_descr->buf = (char*)virt_to_phys(tc->tcfg->iv);
511 	iv_desc->dma_descr->after = iv_desc->dma_descr->buf + tc->blocklength;
512 
513 	/* Setup metadata. */
514 	mo.hashsel = mo.csumsel = src_none;
515 	mo.ciphsel = src_dma;
516 	mo.ciphconf = tc->ciph_conf;
517 	mo.cbcmode = tc->cbcmode;
518 
519 	iv_desc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo);
520 
521 	iv_desc->dma_descr->out_eop = 0;
522 	iv_desc->dma_descr->wait = 1;
523 	iv_desc->dma_descr->intr = 0;
524 
525 	*id = iv_desc;
526 	return 0;
527 }
528 
529 /* Map the output length of the transform to operation output starting on the inject index. */
create_input_descriptors(struct cryptocop_operation * operation,struct cryptocop_tfrm_ctx * tc,struct cryptocop_dma_desc ** id,int alloc_flag)530 static int create_input_descriptors(struct cryptocop_operation *operation, struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **id, int alloc_flag)
531 {
532 	int                        err = 0;
533 	struct cryptocop_dma_desc  head = {0};
534 	struct cryptocop_dma_desc  *outdesc = &head;
535 	size_t                     iov_offset = 0;
536 	size_t                     out_ix = 0;
537 	int                        outiov_ix = 0;
538 	struct strcop_meta_in      mi = {0};
539 
540 	size_t                     out_length = tc->produced;
541 	int                        rem_length;
542 	int                        dlength;
543 
544 	assert(out_length != 0);
545 	if (((tc->produced + tc->tcfg->inject_ix) > operation->tfrm_op.outlen) || (tc->produced && (operation->tfrm_op.outlen == 0))) {
546 		DEBUG_API(printk("create_input_descriptors: operation outdata too small\n"));
547 		return -EINVAL;
548 	}
549 	/* Traverse the out iovec until the result inject index is reached. */
550 	while ((outiov_ix < operation->tfrm_op.outcount) && ((out_ix + operation->tfrm_op.outdata[outiov_ix].iov_len) <= tc->tcfg->inject_ix)){
551 		out_ix += operation->tfrm_op.outdata[outiov_ix].iov_len;
552 		outiov_ix++;
553 	}
554 	if (outiov_ix >= operation->tfrm_op.outcount){
555 		DEBUG_API(printk("create_input_descriptors: operation outdata too small\n"));
556 		return -EINVAL;
557 	}
558 	iov_offset = tc->tcfg->inject_ix - out_ix;
559 	mi.dmasel = tc->unit_no;
560 
561 	/* Setup the output descriptors. */
562 	while ((out_length > 0) && (outiov_ix < operation->tfrm_op.outcount)) {
563 		outdesc->next = alloc_cdesc(alloc_flag);
564 		if (!outdesc->next) {
565 			DEBUG_API(printk("create_input_descriptors: alloc_cdesc\n"));
566 			err = -ENOMEM;
567 			goto error_cleanup;
568 		}
569 		outdesc = outdesc->next;
570 		rem_length = operation->tfrm_op.outdata[outiov_ix].iov_len - iov_offset;
571 		dlength = (out_length < rem_length) ? out_length : rem_length;
572 
573 		DEBUG(printk("create_input_descriptors:\n"
574 			     "outiov_ix=%d, rem_length=%d, dlength=%d\n"
575 			     "iov_offset=%d, outdata[outiov_ix].iov_len=%d\n"
576 			     "outcount=%d, outiov_ix=%d\n",
577 			     outiov_ix, rem_length, dlength, iov_offset, operation->tfrm_op.outdata[outiov_ix].iov_len, operation->tfrm_op.outcount, outiov_ix));
578 
579 		outdesc->dma_descr->buf = (char*)virt_to_phys(operation->tfrm_op.outdata[outiov_ix].iov_base + iov_offset);
580 		outdesc->dma_descr->after = outdesc->dma_descr->buf + dlength;
581 		outdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
582 
583 		out_length -= dlength;
584 		iov_offset += dlength;
585 		if (iov_offset >= operation->tfrm_op.outdata[outiov_ix].iov_len) {
586 			iov_offset = 0;
587 			++outiov_ix;
588 		}
589 	}
590 	if (out_length > 0){
591 		DEBUG_API(printk("create_input_descriptors: not enough room for output, %d remained\n", out_length));
592 		err = -EINVAL;
593 		goto error_cleanup;
594 	}
595 	/* Set sync in last descriptor. */
596 	mi.sync = 1;
597 	outdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
598 
599 	*id = head.next;
600 	return 0;
601 
602  error_cleanup:
603 	while (head.next) {
604 		outdesc = head.next->next;
605 		free_cdesc(head.next);
606 		head.next = outdesc;
607 	}
608 	return err;
609 }
610 
611 
create_output_descriptors(struct cryptocop_operation * operation,int * iniov_ix,int * iniov_offset,size_t desc_len,struct cryptocop_dma_desc ** current_out_cdesc,struct strcop_meta_out * meta_out,int alloc_flag)612 static int create_output_descriptors(struct cryptocop_operation *operation, int *iniov_ix, int *iniov_offset, size_t desc_len, struct cryptocop_dma_desc **current_out_cdesc, struct strcop_meta_out *meta_out, int alloc_flag)
613 {
614 	while (desc_len != 0) {
615 		struct cryptocop_dma_desc  *cdesc;
616 		int                        rem_length = operation->tfrm_op.indata[*iniov_ix].iov_len - *iniov_offset;
617 		int                        dlength = (desc_len < rem_length) ? desc_len : rem_length;
618 
619 		cdesc = alloc_cdesc(alloc_flag);
620 		if (!cdesc) {
621 			DEBUG_API(printk("create_output_descriptors: alloc_cdesc\n"));
622 			return -ENOMEM;
623 		}
624 		(*current_out_cdesc)->next = cdesc;
625 		(*current_out_cdesc) = cdesc;
626 
627 		cdesc->free_buf = NULL;
628 
629 		cdesc->dma_descr->buf = (char*)virt_to_phys(operation->tfrm_op.indata[*iniov_ix].iov_base + *iniov_offset);
630 		cdesc->dma_descr->after = cdesc->dma_descr->buf + dlength;
631 
632 		assert(desc_len >= dlength);
633 		desc_len -= dlength;
634 		*iniov_offset += dlength;
635 		if (*iniov_offset >= operation->tfrm_op.indata[*iniov_ix].iov_len) {
636 			*iniov_offset = 0;
637 			++(*iniov_ix);
638 			if (*iniov_ix > operation->tfrm_op.incount) {
639 				DEBUG_API(printk("create_output_descriptors: not enough indata in operation."));
640 				return  -EINVAL;
641 			}
642 		}
643 		cdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, (*meta_out));
644 	} /* while (desc_len != 0) */
645 	/* Last DMA descriptor gets a 'wait' bit to signal expected change in metadata. */
646 	(*current_out_cdesc)->dma_descr->wait = 1; /* This will set extraneous WAIT in some situations, e.g. when padding hashes and checksums. */
647 
648 	return 0;
649 }
650 
651 
append_input_descriptors(struct cryptocop_operation * operation,struct cryptocop_dma_desc ** current_in_cdesc,struct cryptocop_dma_desc ** current_out_cdesc,struct cryptocop_tfrm_ctx * tc,int alloc_flag)652 static int append_input_descriptors(struct cryptocop_operation *operation, struct cryptocop_dma_desc **current_in_cdesc, struct cryptocop_dma_desc **current_out_cdesc, struct cryptocop_tfrm_ctx *tc, int alloc_flag)
653 {
654 	DEBUG(printk("append_input_descriptors, tc=0x%p, unit_no=%d\n", tc, tc->unit_no));
655 	if (tc->tcfg) {
656 		int                        failed = 0;
657 		struct cryptocop_dma_desc  *idescs = NULL;
658 		DEBUG(printk("append_input_descriptors: pushing output, consumed %d produced %d bytes.\n", tc->consumed, tc->produced));
659 		if (tc->pad_descs) {
660 			DEBUG(printk("append_input_descriptors: append pad descriptors to DMA out list.\n"));
661 			while (tc->pad_descs) {
662 				DEBUG(printk("append descriptor 0x%p\n", tc->pad_descs));
663 				(*current_out_cdesc)->next = tc->pad_descs;
664 				tc->pad_descs = tc->pad_descs->next;
665 				(*current_out_cdesc) = (*current_out_cdesc)->next;
666 			}
667 		}
668 
669 		/* Setup and append output descriptors to DMA in list. */
670 		if (tc->unit_no == src_dma){
671 			/* mem2mem.  Setup DMA in descriptors to discard all input prior to the requested mem2mem data. */
672 			struct strcop_meta_in mi = {.sync = 0, .dmasel = src_dma};
673 			unsigned int start_ix = tc->start_ix;
674 			while (start_ix){
675 				unsigned int desclen = start_ix < MEM2MEM_DISCARD_BUF_LENGTH ? start_ix : MEM2MEM_DISCARD_BUF_LENGTH;
676 				(*current_in_cdesc)->next = alloc_cdesc(alloc_flag);
677 				if (!(*current_in_cdesc)->next){
678 					DEBUG_API(printk("append_input_descriptors: alloc_cdesc mem2mem discard failed\n"));
679 					return -ENOMEM;
680 				}
681 				(*current_in_cdesc) = (*current_in_cdesc)->next;
682 				(*current_in_cdesc)->dma_descr->buf = (char*)virt_to_phys(mem2mem_discard_buf);
683 				(*current_in_cdesc)->dma_descr->after = (*current_in_cdesc)->dma_descr->buf + desclen;
684 				(*current_in_cdesc)->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
685 				start_ix -= desclen;
686 			}
687 			mi.sync = 1;
688 			(*current_in_cdesc)->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
689 		}
690 
691 		failed = create_input_descriptors(operation, tc, &idescs, alloc_flag);
692 		if (failed){
693 			DEBUG_API(printk("append_input_descriptors: output descriptor setup failed\n"));
694 			return failed;
695 		}
696 		DEBUG(printk("append_input_descriptors: append output descriptors to DMA in list.\n"));
697 		while (idescs) {
698 			DEBUG(printk("append descriptor 0x%p\n", idescs));
699 			(*current_in_cdesc)->next = idescs;
700 			idescs = idescs->next;
701 			(*current_in_cdesc) = (*current_in_cdesc)->next;
702 		}
703 	}
704 	return 0;
705 }
706 
707 
708 
cryptocop_setup_dma_list(struct cryptocop_operation * operation,struct cryptocop_int_operation ** int_op,int alloc_flag)709 static int cryptocop_setup_dma_list(struct cryptocop_operation *operation, struct cryptocop_int_operation **int_op, int alloc_flag)
710 {
711 	struct cryptocop_session *sess;
712 	struct cryptocop_transform_ctx *tctx;
713 
714 	struct cryptocop_tfrm_ctx digest_ctx = {
715 		.previous_src = src_none,
716 		.current_src = src_none,
717 		.start_ix = 0,
718 		.requires_padding = 1,
719 		.strict_block_length = 0,
720 		.hash_conf = 0,
721 		.hash_mode = 0,
722 		.ciph_conf = 0,
723 		.cbcmode = 0,
724 		.decrypt = 0,
725 		.consumed = 0,
726 		.produced = 0,
727 		.pad_descs = NULL,
728 		.active = 0,
729 		.done = 0,
730 		.prev_src = NULL,
731 		.curr_src = NULL,
732 		.tcfg = NULL};
733 	struct cryptocop_tfrm_ctx cipher_ctx = {
734 		.previous_src = src_none,
735 		.current_src = src_none,
736 		.start_ix = 0,
737 		.requires_padding = 0,
738 		.strict_block_length = 1,
739 		.hash_conf = 0,
740 		.hash_mode = 0,
741 		.ciph_conf = 0,
742 		.cbcmode = 0,
743 		.decrypt = 0,
744 		.consumed = 0,
745 		.produced = 0,
746 		.pad_descs = NULL,
747 		.active = 0,
748 		.done = 0,
749 		.prev_src = NULL,
750 		.curr_src = NULL,
751 		.tcfg = NULL};
752 	struct cryptocop_tfrm_ctx csum_ctx = {
753 		.previous_src = src_none,
754 		.current_src = src_none,
755 		.start_ix = 0,
756 		.blocklength = 2,
757 		.requires_padding = 1,
758 		.strict_block_length = 0,
759 		.hash_conf = 0,
760 		.hash_mode = 0,
761 		.ciph_conf = 0,
762 		.cbcmode = 0,
763 		.decrypt = 0,
764 		.consumed = 0,
765 		.produced = 0,
766 		.pad_descs = NULL,
767 		.active = 0,
768 		.done = 0,
769 		.tcfg = NULL,
770 		.prev_src = NULL,
771 		.curr_src = NULL,
772 		.unit_no = src_csum};
773 	struct cryptocop_tfrm_cfg *tcfg = operation->tfrm_op.tfrm_cfg;
774 
775 	unsigned int indata_ix = 0;
776 
777 	/* iovec accounting. */
778 	int iniov_ix = 0;
779 	int iniov_offset = 0;
780 
781 	/* Operation descriptor cfg traversal pointer. */
782 	struct cryptocop_desc *odsc;
783 
784 	int failed = 0;
785 	/* List heads for allocated descriptors. */
786 	struct cryptocop_dma_desc out_cdesc_head = {0};
787 	struct cryptocop_dma_desc in_cdesc_head = {0};
788 
789 	struct cryptocop_dma_desc *current_out_cdesc = &out_cdesc_head;
790 	struct cryptocop_dma_desc *current_in_cdesc = &in_cdesc_head;
791 
792 	struct cryptocop_tfrm_ctx *output_tc = NULL;
793 	void                      *iop_alloc_ptr;
794 
795 	assert(operation != NULL);
796 	assert(int_op != NULL);
797 
798 	DEBUG(printk("cryptocop_setup_dma_list: start\n"));
799 	DEBUG(print_cryptocop_operation(operation));
800 
801 	sess = get_session(operation->sid);
802 	if (!sess) {
803 		DEBUG_API(printk("cryptocop_setup_dma_list: no session found for operation.\n"));
804 		failed = -EINVAL;
805 		goto error_cleanup;
806 	}
807 	iop_alloc_ptr = kmalloc(DESCR_ALLOC_PAD + sizeof(struct cryptocop_int_operation), alloc_flag);
808 	if (!iop_alloc_ptr) {
809 		DEBUG_API(printk("cryptocop_setup_dma_list:  kmalloc cryptocop_int_operation\n"));
810 		failed = -ENOMEM;
811 		goto error_cleanup;
812 	}
813 	(*int_op) = (struct cryptocop_int_operation*)(((unsigned long int)(iop_alloc_ptr + DESCR_ALLOC_PAD + offsetof(struct cryptocop_int_operation, ctx_out)) & ~0x0000001F) - offsetof(struct cryptocop_int_operation, ctx_out));
814 	DEBUG(memset((*int_op), 0xff, sizeof(struct cryptocop_int_operation)));
815 	(*int_op)->alloc_ptr = iop_alloc_ptr;
816 	DEBUG(printk("cryptocop_setup_dma_list: *int_op=0x%p, alloc_ptr=0x%p\n", *int_op, (*int_op)->alloc_ptr));
817 
818 	(*int_op)->sid = operation->sid;
819 	(*int_op)->cdesc_out = NULL;
820 	(*int_op)->cdesc_in = NULL;
821 	(*int_op)->tdes_mode = cryptocop_3des_ede;
822 	(*int_op)->csum_mode = cryptocop_csum_le;
823 	(*int_op)->ddesc_out = NULL;
824 	(*int_op)->ddesc_in = NULL;
825 
826 	/* Scan operation->tfrm_op.tfrm_cfg for bad configuration and set up the local contexts. */
827 	if (!tcfg) {
828 		DEBUG_API(printk("cryptocop_setup_dma_list: no configured transforms in operation.\n"));
829 		failed = -EINVAL;
830 		goto error_cleanup;
831 	}
832 	while (tcfg) {
833 		tctx = get_transform_ctx(sess, tcfg->tid);
834 		if (!tctx) {
835 			DEBUG_API(printk("cryptocop_setup_dma_list: no transform id %d in session.\n", tcfg->tid));
836 			failed = -EINVAL;
837 			goto error_cleanup;
838 		}
839 		if (tcfg->inject_ix > operation->tfrm_op.outlen){
840 			DEBUG_API(printk("cryptocop_setup_dma_list: transform id %d inject_ix (%d) > operation->tfrm_op.outlen(%d)", tcfg->tid, tcfg->inject_ix, operation->tfrm_op.outlen));
841 			failed = -EINVAL;
842 			goto error_cleanup;
843 		}
844 		switch (tctx->init.alg){
845 		case cryptocop_alg_mem2mem:
846 			if (cipher_ctx.tcfg != NULL){
847 				DEBUG_API(printk("cryptocop_setup_dma_list: multiple ciphers in operation.\n"));
848 				failed = -EINVAL;
849 				goto error_cleanup;
850 			}
851 			/* mem2mem is handled as a NULL cipher. */
852 			cipher_ctx.cbcmode = 0;
853 			cipher_ctx.decrypt = 0;
854 			cipher_ctx.blocklength = 1;
855 			cipher_ctx.ciph_conf = 0;
856 			cipher_ctx.unit_no = src_dma;
857 			cipher_ctx.tcfg = tcfg;
858 			cipher_ctx.tctx = tctx;
859 			break;
860 		case cryptocop_alg_des:
861 		case cryptocop_alg_3des:
862 		case cryptocop_alg_aes:
863 			/* cipher */
864 			if (cipher_ctx.tcfg != NULL){
865 				DEBUG_API(printk("cryptocop_setup_dma_list: multiple ciphers in operation.\n"));
866 				failed = -EINVAL;
867 				goto error_cleanup;
868 			}
869 			cipher_ctx.tcfg = tcfg;
870 			cipher_ctx.tctx = tctx;
871 			if (cipher_ctx.tcfg->flags & CRYPTOCOP_DECRYPT){
872 				cipher_ctx.decrypt = 1;
873 			}
874 			switch (tctx->init.cipher_mode) {
875 			case cryptocop_cipher_mode_ecb:
876 				cipher_ctx.cbcmode = 0;
877 				break;
878 			case cryptocop_cipher_mode_cbc:
879 				cipher_ctx.cbcmode = 1;
880 				break;
881 			default:
882 				DEBUG_API(printk("cryptocop_setup_dma_list: cipher_ctx, bad cipher mode==%d\n", tctx->init.cipher_mode));
883 				failed = -EINVAL;
884 				goto error_cleanup;
885 			}
886 			DEBUG(printk("cryptocop_setup_dma_list: cipher_ctx, set CBC mode==%d\n", cipher_ctx.cbcmode));
887 			switch (tctx->init.alg){
888 			case cryptocop_alg_des:
889 				cipher_ctx.ciph_conf = 0;
890 				cipher_ctx.unit_no = src_des;
891 				cipher_ctx.blocklength = DES_BLOCK_LENGTH;
892 				break;
893 			case cryptocop_alg_3des:
894 				cipher_ctx.ciph_conf = 1;
895 				cipher_ctx.unit_no = src_des;
896 				cipher_ctx.blocklength = DES_BLOCK_LENGTH;
897 				break;
898 			case cryptocop_alg_aes:
899 				cipher_ctx.ciph_conf = 2;
900 				cipher_ctx.unit_no = src_aes;
901 				cipher_ctx.blocklength = AES_BLOCK_LENGTH;
902 				break;
903 			default:
904 				panic("cryptocop_setup_dma_list: impossible algorithm %d\n", tctx->init.alg);
905 			}
906 			(*int_op)->tdes_mode = tctx->init.tdes_mode;
907 			break;
908 		case cryptocop_alg_md5:
909 		case cryptocop_alg_sha1:
910 			/* digest */
911 			if (digest_ctx.tcfg != NULL){
912 				DEBUG_API(printk("cryptocop_setup_dma_list: multiple digests in operation.\n"));
913 				failed = -EINVAL;
914 				goto error_cleanup;
915 			}
916 			digest_ctx.tcfg = tcfg;
917 			digest_ctx.tctx = tctx;
918 			digest_ctx.hash_mode = 0; /* Don't use explicit IV in this API. */
919 			switch (tctx->init.alg){
920 			case cryptocop_alg_md5:
921 				digest_ctx.blocklength = MD5_BLOCK_LENGTH;
922 				digest_ctx.unit_no = src_md5;
923 				digest_ctx.hash_conf = 1; /* 1 => MD-5 */
924 				break;
925 			case cryptocop_alg_sha1:
926 				digest_ctx.blocklength = SHA1_BLOCK_LENGTH;
927 				digest_ctx.unit_no = src_sha1;
928 				digest_ctx.hash_conf = 0; /* 0 => SHA-1 */
929 				break;
930 			default:
931 				panic("cryptocop_setup_dma_list: impossible digest algorithm\n");
932 			}
933 			break;
934 		case cryptocop_alg_csum:
935 			/* digest */
936 			if (csum_ctx.tcfg != NULL){
937 				DEBUG_API(printk("cryptocop_setup_dma_list: multiple checksums in operation.\n"));
938 				failed = -EINVAL;
939 				goto error_cleanup;
940 			}
941 			(*int_op)->csum_mode = tctx->init.csum_mode;
942 			csum_ctx.tcfg = tcfg;
943 			csum_ctx.tctx = tctx;
944 			break;
945 		default:
946 			/* no algorithm. */
947 			DEBUG_API(printk("cryptocop_setup_dma_list: invalid algorithm %d specified in tfrm %d.\n", tctx->init.alg, tcfg->tid));
948 			failed = -EINVAL;
949 			goto error_cleanup;
950 		}
951 		tcfg = tcfg->next;
952 	}
953 	/* Download key if a cipher is used. */
954 	if (cipher_ctx.tcfg && (cipher_ctx.tctx->init.alg != cryptocop_alg_mem2mem)){
955 		struct cryptocop_dma_desc  *key_desc = NULL;
956 
957 		failed = setup_key_dl_desc(&cipher_ctx, &key_desc, alloc_flag);
958 		if (failed) {
959 			DEBUG_API(printk("cryptocop_setup_dma_list: setup key dl\n"));
960 			goto error_cleanup;
961 		}
962 		current_out_cdesc->next = key_desc;
963 		current_out_cdesc = key_desc;
964 		indata_ix += (unsigned int)(key_desc->dma_descr->after - key_desc->dma_descr->buf);
965 
966 		/* Download explicit IV if a cipher is used and CBC mode and explicit IV selected. */
967 		if ((cipher_ctx.tctx->init.cipher_mode == cryptocop_cipher_mode_cbc) && (cipher_ctx.tcfg->flags & CRYPTOCOP_EXPLICIT_IV)) {
968 			struct cryptocop_dma_desc  *iv_desc = NULL;
969 
970 			DEBUG(printk("cryptocop_setup_dma_list: setup cipher CBC IV descriptor.\n"));
971 
972 			failed = setup_cipher_iv_desc(&cipher_ctx, &iv_desc, alloc_flag);
973 			if (failed) {
974 				DEBUG_API(printk("cryptocop_setup_dma_list: CBC IV descriptor.\n"));
975 				goto error_cleanup;
976 			}
977 			current_out_cdesc->next = iv_desc;
978 			current_out_cdesc = iv_desc;
979 			indata_ix += (unsigned int)(iv_desc->dma_descr->after - iv_desc->dma_descr->buf);
980 		}
981 	}
982 
983 	/* Process descriptors. */
984 	odsc = operation->tfrm_op.desc;
985 	while (odsc) {
986 		struct cryptocop_desc_cfg   *dcfg = odsc->cfg;
987 		struct strcop_meta_out      meta_out = {0};
988 		size_t                      desc_len = odsc->length;
989 		int                         active_count, eop_needed_count;
990 
991 		output_tc = NULL;
992 
993 		DEBUG(printk("cryptocop_setup_dma_list: parsing an operation descriptor\n"));
994 
995 		while (dcfg) {
996 			struct cryptocop_tfrm_ctx  *tc = NULL;
997 
998 			DEBUG(printk("cryptocop_setup_dma_list: parsing an operation descriptor configuration.\n"));
999 			/* Get the local context for the transform and mark it as the output unit if it produces output. */
1000 			if (digest_ctx.tcfg && (digest_ctx.tcfg->tid == dcfg->tid)){
1001 				tc = &digest_ctx;
1002 			} else if (cipher_ctx.tcfg && (cipher_ctx.tcfg->tid == dcfg->tid)){
1003 				tc = &cipher_ctx;
1004 			} else if (csum_ctx.tcfg && (csum_ctx.tcfg->tid == dcfg->tid)){
1005 				tc = &csum_ctx;
1006 			}
1007 			if (!tc) {
1008 				DEBUG_API(printk("cryptocop_setup_dma_list: invalid transform %d specified in descriptor.\n", dcfg->tid));
1009 				failed = -EINVAL;
1010 				goto error_cleanup;
1011 			}
1012 			if (tc->done) {
1013 				DEBUG_API(printk("cryptocop_setup_dma_list: completed transform %d reused.\n", dcfg->tid));
1014 				failed = -EINVAL;
1015 				goto error_cleanup;
1016 			}
1017 			if (!tc->active) {
1018 				tc->start_ix = indata_ix;
1019 				tc->active = 1;
1020 			}
1021 
1022 			tc->previous_src = tc->current_src;
1023 			tc->prev_src = tc->curr_src;
1024 			/* Map source unit id to DMA source config. */
1025 			switch (dcfg->src){
1026 			case cryptocop_source_dma:
1027 				tc->current_src = src_dma;
1028 				break;
1029 			case cryptocop_source_des:
1030 				tc->current_src = src_des;
1031 				break;
1032 			case cryptocop_source_3des:
1033 				tc->current_src = src_des;
1034 				break;
1035 			case cryptocop_source_aes:
1036 				tc->current_src = src_aes;
1037 				break;
1038 			case cryptocop_source_md5:
1039 			case cryptocop_source_sha1:
1040 			case cryptocop_source_csum:
1041 			case cryptocop_source_none:
1042 			default:
1043 				/* We do not allow using accumulating style units (SHA-1, MD5, checksum) as sources to other units.
1044 				 */
1045 				DEBUG_API(printk("cryptocop_setup_dma_list: bad unit source configured %d.\n", dcfg->src));
1046 				failed = -EINVAL;
1047 				goto error_cleanup;
1048 			}
1049 			if (tc->current_src != src_dma) {
1050 				/* Find the unit we are sourcing from. */
1051 				if (digest_ctx.unit_no == tc->current_src){
1052 					tc->curr_src = &digest_ctx;
1053 				} else if (cipher_ctx.unit_no == tc->current_src){
1054 					tc->curr_src = &cipher_ctx;
1055 				} else if (csum_ctx.unit_no == tc->current_src){
1056 					tc->curr_src = &csum_ctx;
1057 				}
1058 				if ((tc->curr_src == tc) && (tc->unit_no != src_dma)){
1059 					DEBUG_API(printk("cryptocop_setup_dma_list: unit %d configured to source from itself.\n", tc->unit_no));
1060 					failed = -EINVAL;
1061 					goto error_cleanup;
1062 				}
1063 			} else {
1064 				tc->curr_src = NULL;
1065 			}
1066 
1067 			/* Detect source switch. */
1068 			DEBUG(printk("cryptocop_setup_dma_list: tc->active=%d tc->unit_no=%d tc->current_src=%d tc->previous_src=%d, tc->curr_src=0x%p, tc->prev_srv=0x%p\n", tc->active, tc->unit_no, tc->current_src, tc->previous_src, tc->curr_src, tc->prev_src));
1069 			if (tc->active && (tc->current_src != tc->previous_src)) {
1070 				/* Only allow source switch when both the old source unit and the new one have
1071 				 * no pending data to process (i.e. the consumed length must be a multiple of the
1072 				 * transform blocklength). */
1073 				/* Note: if the src == NULL we are actually sourcing from DMA out. */
1074 				if (((tc->prev_src != NULL) && (tc->prev_src->consumed % tc->prev_src->blocklength)) ||
1075 				    ((tc->curr_src != NULL) && (tc->curr_src->consumed % tc->curr_src->blocklength)))
1076 				{
1077 					DEBUG_API(printk("cryptocop_setup_dma_list: can only disconnect from or connect to a unit on a multiple of the blocklength, old: cons=%d, prod=%d, block=%d, new: cons=%d prod=%d, block=%d.\n", tc->prev_src ? tc->prev_src->consumed : INT_MIN, tc->prev_src ? tc->prev_src->produced : INT_MIN, tc->prev_src ? tc->prev_src->blocklength : INT_MIN, tc->curr_src ? tc->curr_src->consumed : INT_MIN, tc->curr_src ? tc->curr_src->produced : INT_MIN, tc->curr_src ? tc->curr_src->blocklength : INT_MIN));
1078 					failed = -EINVAL;
1079 					goto error_cleanup;
1080 				}
1081 			}
1082 			/* Detect unit deactivation. */
1083 			if (dcfg->last) {
1084 				/* Length check of this is handled below. */
1085 				tc->done = 1;
1086 			}
1087 			dcfg = dcfg->next;
1088 		} /* while (dcfg) */
1089 		DEBUG(printk("cryptocop_setup_dma_list: parsing operation descriptor configuration complete.\n"));
1090 
1091 		if (cipher_ctx.active && (cipher_ctx.curr_src != NULL) && !cipher_ctx.curr_src->active){
1092 			DEBUG_API(printk("cryptocop_setup_dma_list: cipher source from inactive unit %d\n", cipher_ctx.curr_src->unit_no));
1093 			failed = -EINVAL;
1094 			goto error_cleanup;
1095 		}
1096 		if (digest_ctx.active && (digest_ctx.curr_src != NULL) && !digest_ctx.curr_src->active){
1097 			DEBUG_API(printk("cryptocop_setup_dma_list: digest source from inactive unit %d\n", digest_ctx.curr_src->unit_no));
1098 			failed = -EINVAL;
1099 			goto error_cleanup;
1100 		}
1101 		if (csum_ctx.active && (csum_ctx.curr_src != NULL) && !csum_ctx.curr_src->active){
1102 			DEBUG_API(printk("cryptocop_setup_dma_list: cipher source from inactive unit %d\n", csum_ctx.curr_src->unit_no));
1103 			failed = -EINVAL;
1104 			goto error_cleanup;
1105 		}
1106 
1107 		/* Update consumed and produced lengths.
1108 
1109 		   The consumed length accounting here is actually cheating.  If a unit source from DMA (or any
1110 		   other unit that process data in blocks of one octet) it is correct, but if it source from a
1111 		   block processing unit, i.e. a cipher, it will be temporarily incorrect at some times.  However
1112 		   since it is only allowed--by the HW--to change source to or from a block processing unit at times where that
1113 		   unit has processed an exact multiple of its block length the end result will be correct.
1114 		   Beware that if the source change restriction change this code will need to be (much) reworked.
1115 		*/
1116 		DEBUG(printk("cryptocop_setup_dma_list: desc->length=%d, desc_len=%d.\n", odsc->length, desc_len));
1117 
1118 		if (csum_ctx.active) {
1119 			csum_ctx.consumed += desc_len;
1120 			if (csum_ctx.done) {
1121 				csum_ctx.produced = 2;
1122 			}
1123 			DEBUG(printk("cryptocop_setup_dma_list: csum_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", csum_ctx.consumed, csum_ctx.produced, csum_ctx.blocklength));
1124 		}
1125 		if (digest_ctx.active) {
1126 			digest_ctx.consumed += desc_len;
1127 			if (digest_ctx.done) {
1128 				if (digest_ctx.unit_no == src_md5) {
1129 					digest_ctx.produced = MD5_STATE_LENGTH;
1130 				} else {
1131 					digest_ctx.produced = SHA1_STATE_LENGTH;
1132 				}
1133 			}
1134 			DEBUG(printk("cryptocop_setup_dma_list: digest_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", digest_ctx.consumed, digest_ctx.produced, digest_ctx.blocklength));
1135 		}
1136 		if (cipher_ctx.active) {
1137 			/* Ciphers are allowed only to source from DMA out.  That is filtered above. */
1138 			assert(cipher_ctx.current_src == src_dma);
1139 			cipher_ctx.consumed += desc_len;
1140 			cipher_ctx.produced = cipher_ctx.blocklength * (cipher_ctx.consumed / cipher_ctx.blocklength);
1141 			if (cipher_ctx.cbcmode && !(cipher_ctx.tcfg->flags & CRYPTOCOP_EXPLICIT_IV) && cipher_ctx.produced){
1142 				cipher_ctx.produced -= cipher_ctx.blocklength; /* Compensate for CBC iv. */
1143 			}
1144 			DEBUG(printk("cryptocop_setup_dma_list: cipher_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", cipher_ctx.consumed, cipher_ctx.produced, cipher_ctx.blocklength));
1145 		}
1146 
1147 		/* Setup the DMA out descriptors. */
1148 		/* Configure the metadata. */
1149 		active_count = 0;
1150 		eop_needed_count = 0;
1151 		if (cipher_ctx.active) {
1152 			++active_count;
1153 			if (cipher_ctx.unit_no == src_dma){
1154 				/* mem2mem */
1155 				meta_out.ciphsel = src_none;
1156 			} else {
1157 				meta_out.ciphsel = cipher_ctx.current_src;
1158 			}
1159 			meta_out.ciphconf = cipher_ctx.ciph_conf;
1160 			meta_out.cbcmode = cipher_ctx.cbcmode;
1161 			meta_out.decrypt = cipher_ctx.decrypt;
1162 			DEBUG(printk("set ciphsel=%d ciphconf=%d cbcmode=%d decrypt=%d\n", meta_out.ciphsel, meta_out.ciphconf, meta_out.cbcmode, meta_out.decrypt));
1163 			if (cipher_ctx.done) ++eop_needed_count;
1164 		} else {
1165 			meta_out.ciphsel = src_none;
1166 		}
1167 
1168 		if (digest_ctx.active) {
1169 			++active_count;
1170 			meta_out.hashsel = digest_ctx.current_src;
1171 			meta_out.hashconf = digest_ctx.hash_conf;
1172 			meta_out.hashmode = 0; /* Explicit mode is not used here. */
1173 			DEBUG(printk("set hashsel=%d hashconf=%d hashmode=%d\n", meta_out.hashsel, meta_out.hashconf, meta_out.hashmode));
1174 			if (digest_ctx.done) {
1175 				assert(digest_ctx.pad_descs == NULL);
1176 				failed = create_pad_descriptor(&digest_ctx, &digest_ctx.pad_descs, alloc_flag);
1177 				if (failed) {
1178 					DEBUG_API(printk("cryptocop_setup_dma_list: failed digest pad creation.\n"));
1179 					goto error_cleanup;
1180 				}
1181 			}
1182 		} else {
1183 			meta_out.hashsel = src_none;
1184 		}
1185 
1186 		if (csum_ctx.active) {
1187 			++active_count;
1188 			meta_out.csumsel = csum_ctx.current_src;
1189 			if (csum_ctx.done) {
1190 				assert(csum_ctx.pad_descs == NULL);
1191 				failed = create_pad_descriptor(&csum_ctx, &csum_ctx.pad_descs, alloc_flag);
1192 				if (failed) {
1193 					DEBUG_API(printk("cryptocop_setup_dma_list: failed csum pad creation.\n"));
1194 					goto error_cleanup;
1195 				}
1196 			}
1197 		} else {
1198 			meta_out.csumsel = src_none;
1199 		}
1200 		DEBUG(printk("cryptocop_setup_dma_list: %d eop needed, %d active units\n", eop_needed_count, active_count));
1201 		/* Setup DMA out descriptors for the indata. */
1202 		failed = create_output_descriptors(operation, &iniov_ix, &iniov_offset, desc_len, &current_out_cdesc, &meta_out, alloc_flag);
1203 		if (failed) {
1204 			DEBUG_API(printk("cryptocop_setup_dma_list: create_output_descriptors %d\n", failed));
1205 			goto error_cleanup;
1206 		}
1207 		/* Setup out EOP.  If there are active units that are not done here they cannot get an EOP
1208 		 * so we ust setup a zero length descriptor to DMA to signal EOP only to done units.
1209 		 * If there is a pad descriptor EOP for the padded unit will be EOPed by it.
1210 		 */
1211 		assert(active_count >= eop_needed_count);
1212 		assert((eop_needed_count == 0) || (eop_needed_count == 1));
1213 		if (eop_needed_count) {
1214 			/* This means that the bulk operation (cipher/m2m) is terminated. */
1215 			if (active_count > 1) {
1216 				/* Use zero length EOP descriptor. */
1217 				struct cryptocop_dma_desc *ed = alloc_cdesc(alloc_flag);
1218 				struct strcop_meta_out    ed_mo = {0};
1219 				if (!ed) {
1220 					DEBUG_API(printk("cryptocop_setup_dma_list: alloc EOP descriptor for cipher\n"));
1221 					failed = -ENOMEM;
1222 					goto error_cleanup;
1223 				}
1224 
1225 				assert(cipher_ctx.active && cipher_ctx.done);
1226 
1227 				if (cipher_ctx.unit_no == src_dma){
1228 					/* mem2mem */
1229 					ed_mo.ciphsel = src_none;
1230 				} else {
1231 					ed_mo.ciphsel = cipher_ctx.current_src;
1232 				}
1233 				ed_mo.ciphconf = cipher_ctx.ciph_conf;
1234 				ed_mo.cbcmode = cipher_ctx.cbcmode;
1235 				ed_mo.decrypt = cipher_ctx.decrypt;
1236 
1237 				ed->free_buf = NULL;
1238 				ed->dma_descr->wait = 1;
1239 				ed->dma_descr->out_eop = 1;
1240 
1241 				ed->dma_descr->buf = (char*)virt_to_phys(&ed); /* Use any valid physical address for zero length descriptor. */
1242 				ed->dma_descr->after = ed->dma_descr->buf;
1243 				ed->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, ed_mo);
1244 				current_out_cdesc->next = ed;
1245 				current_out_cdesc = ed;
1246 			} else {
1247 				/* Set EOP in the current out descriptor since the only active module is
1248 				 * the one needing the EOP. */
1249 
1250 				current_out_cdesc->dma_descr->out_eop = 1;
1251 			}
1252 		}
1253 
1254 		if (cipher_ctx.done && cipher_ctx.active) cipher_ctx.active = 0;
1255 		if (digest_ctx.done && digest_ctx.active) digest_ctx.active = 0;
1256 		if (csum_ctx.done && csum_ctx.active) csum_ctx.active = 0;
1257 		indata_ix += odsc->length;
1258 		odsc = odsc->next;
1259 	} /* while (odsc) */ /* Process descriptors. */
1260 	DEBUG(printk("cryptocop_setup_dma_list: done parsing operation descriptors\n"));
1261 	if (cipher_ctx.tcfg && (cipher_ctx.active || !cipher_ctx.done)){
1262 		DEBUG_API(printk("cryptocop_setup_dma_list: cipher operation not terminated.\n"));
1263 		failed = -EINVAL;
1264 		goto error_cleanup;
1265 	}
1266 	if (digest_ctx.tcfg && (digest_ctx.active || !digest_ctx.done)){
1267 		DEBUG_API(printk("cryptocop_setup_dma_list: digest operation not terminated.\n"));
1268 		failed = -EINVAL;
1269 		goto error_cleanup;
1270 	}
1271 	if (csum_ctx.tcfg && (csum_ctx.active || !csum_ctx.done)){
1272 		DEBUG_API(printk("cryptocop_setup_dma_list: csum operation not terminated.\n"));
1273 		failed = -EINVAL;
1274 		goto error_cleanup;
1275 	}
1276 
1277 	failed = append_input_descriptors(operation, &current_in_cdesc, &current_out_cdesc, &cipher_ctx, alloc_flag);
1278 	if (failed){
1279 		DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed));
1280 		goto error_cleanup;
1281 	}
1282 	failed = append_input_descriptors(operation, &current_in_cdesc, &current_out_cdesc, &digest_ctx, alloc_flag);
1283 	if (failed){
1284 		DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed));
1285 		goto error_cleanup;
1286 	}
1287 	failed = append_input_descriptors(operation, &current_in_cdesc, &current_out_cdesc, &csum_ctx, alloc_flag);
1288 	if (failed){
1289 		DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed));
1290 		goto error_cleanup;
1291 	}
1292 
1293 	DEBUG(printk("cryptocop_setup_dma_list: int_op=0x%p, *int_op=0x%p\n", int_op, *int_op));
1294 	(*int_op)->cdesc_out = out_cdesc_head.next;
1295 	(*int_op)->cdesc_in = in_cdesc_head.next;
1296 	DEBUG(printk("cryptocop_setup_dma_list: out_cdesc_head=0x%p in_cdesc_head=0x%p\n", (*int_op)->cdesc_out, (*int_op)->cdesc_in));
1297 
1298 	setup_descr_chain(out_cdesc_head.next);
1299 	setup_descr_chain(in_cdesc_head.next);
1300 
1301 	/* Last but not least: mark the last DMA in descriptor for a INTR and EOL and the the
1302 	 * last DMA out descriptor for EOL.
1303 	 */
1304 	current_in_cdesc->dma_descr->intr = 1;
1305 	current_in_cdesc->dma_descr->eol = 1;
1306 	current_out_cdesc->dma_descr->eol = 1;
1307 
1308 	/* Setup DMA contexts. */
1309 	(*int_op)->ctx_out.next = NULL;
1310 	(*int_op)->ctx_out.eol = 1;
1311 	(*int_op)->ctx_out.intr = 0;
1312 	(*int_op)->ctx_out.store_mode = 0;
1313 	(*int_op)->ctx_out.en = 0;
1314 	(*int_op)->ctx_out.dis = 0;
1315 	(*int_op)->ctx_out.md0 = 0;
1316 	(*int_op)->ctx_out.md1 = 0;
1317 	(*int_op)->ctx_out.md2 = 0;
1318 	(*int_op)->ctx_out.md3 = 0;
1319 	(*int_op)->ctx_out.md4 = 0;
1320 	(*int_op)->ctx_out.saved_data = (dma_descr_data*)virt_to_phys((*int_op)->cdesc_out->dma_descr);
1321 	(*int_op)->ctx_out.saved_data_buf = (*int_op)->cdesc_out->dma_descr->buf; /* Already physical address. */
1322 
1323 	(*int_op)->ctx_in.next = NULL;
1324 	(*int_op)->ctx_in.eol = 1;
1325 	(*int_op)->ctx_in.intr = 0;
1326 	(*int_op)->ctx_in.store_mode = 0;
1327 	(*int_op)->ctx_in.en = 0;
1328 	(*int_op)->ctx_in.dis = 0;
1329 	(*int_op)->ctx_in.md0 = 0;
1330 	(*int_op)->ctx_in.md1 = 0;
1331 	(*int_op)->ctx_in.md2 = 0;
1332 	(*int_op)->ctx_in.md3 = 0;
1333 	(*int_op)->ctx_in.md4 = 0;
1334 
1335 	(*int_op)->ctx_in.saved_data = (dma_descr_data*)virt_to_phys((*int_op)->cdesc_in->dma_descr);
1336 	(*int_op)->ctx_in.saved_data_buf = (*int_op)->cdesc_in->dma_descr->buf; /* Already physical address. */
1337 
1338 	DEBUG(printk("cryptocop_setup_dma_list: done\n"));
1339 	return 0;
1340 
1341 error_cleanup:
1342 	{
1343 		/* Free all allocated resources. */
1344 		struct cryptocop_dma_desc *tmp_cdesc;
1345 		while (digest_ctx.pad_descs){
1346 			tmp_cdesc = digest_ctx.pad_descs->next;
1347 			free_cdesc(digest_ctx.pad_descs);
1348 			digest_ctx.pad_descs = tmp_cdesc;
1349 		}
1350 		while (csum_ctx.pad_descs){
1351 			tmp_cdesc = csum_ctx.pad_descs->next;
1352 			free_cdesc(csum_ctx.pad_descs);
1353 			csum_ctx.pad_descs = tmp_cdesc;
1354 		}
1355 		assert(cipher_ctx.pad_descs == NULL); /* The ciphers are never padded. */
1356 
1357 		if (*int_op != NULL) delete_internal_operation(*int_op);
1358 	}
1359 	DEBUG_API(printk("cryptocop_setup_dma_list: done with error %d\n", failed));
1360 	return failed;
1361 }
1362 
1363 
delete_internal_operation(struct cryptocop_int_operation * iop)1364 static void delete_internal_operation(struct cryptocop_int_operation *iop)
1365 {
1366 	void                      *ptr = iop->alloc_ptr;
1367 	struct cryptocop_dma_desc *cd = iop->cdesc_out;
1368 	struct cryptocop_dma_desc *next;
1369 
1370 	DEBUG(printk("delete_internal_operation: iop=0x%p, alloc_ptr=0x%p\n", iop, ptr));
1371 
1372 	while (cd) {
1373 		next = cd->next;
1374 		free_cdesc(cd);
1375 		cd = next;
1376 	}
1377 	cd = iop->cdesc_in;
1378 	while (cd) {
1379 		next = cd->next;
1380 		free_cdesc(cd);
1381 		cd = next;
1382 	}
1383 	kfree(ptr);
1384 }
1385 
1386 #define MD5_MIN_PAD_LENGTH (9)
1387 #define MD5_PAD_LENGTH_FIELD_LENGTH (8)
1388 
create_md5_pad(int alloc_flag,unsigned long long hashed_length,char ** pad,size_t * pad_length)1389 static int create_md5_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length)
1390 {
1391 	size_t                  padlen = MD5_BLOCK_LENGTH - (hashed_length % MD5_BLOCK_LENGTH);
1392 	unsigned char           *p;
1393 	int                     i;
1394 	unsigned long long int  bit_length = hashed_length << 3;
1395 
1396 	if (padlen < MD5_MIN_PAD_LENGTH) padlen += MD5_BLOCK_LENGTH;
1397 
1398 	p = kzalloc(padlen, alloc_flag);
1399 	if (!p) return -ENOMEM;
1400 
1401 	*p = 0x80;
1402 
1403 	DEBUG(printk("create_md5_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
1404 
1405 	i = padlen - MD5_PAD_LENGTH_FIELD_LENGTH;
1406 	while (bit_length != 0){
1407 		p[i++] = bit_length % 0x100;
1408 		bit_length >>= 8;
1409 	}
1410 
1411 	*pad = (char*)p;
1412 	*pad_length = padlen;
1413 
1414 	return 0;
1415 }
1416 
1417 #define SHA1_MIN_PAD_LENGTH (9)
1418 #define SHA1_PAD_LENGTH_FIELD_LENGTH (8)
1419 
create_sha1_pad(int alloc_flag,unsigned long long hashed_length,char ** pad,size_t * pad_length)1420 static int create_sha1_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length)
1421 {
1422 	size_t                  padlen = SHA1_BLOCK_LENGTH - (hashed_length % SHA1_BLOCK_LENGTH);
1423 	unsigned char           *p;
1424 	int                     i;
1425 	unsigned long long int  bit_length = hashed_length << 3;
1426 
1427 	if (padlen < SHA1_MIN_PAD_LENGTH) padlen += SHA1_BLOCK_LENGTH;
1428 
1429 	p = kzalloc(padlen, alloc_flag);
1430 	if (!p) return -ENOMEM;
1431 
1432 	*p = 0x80;
1433 
1434 	DEBUG(printk("create_sha1_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
1435 
1436 	i = padlen - 1;
1437 	while (bit_length != 0){
1438 		p[i--] = bit_length % 0x100;
1439 		bit_length >>= 8;
1440 	}
1441 
1442 	*pad = (char*)p;
1443 	*pad_length = padlen;
1444 
1445 	return 0;
1446 }
1447 
1448 
transform_ok(struct cryptocop_transform_init * tinit)1449 static int transform_ok(struct cryptocop_transform_init *tinit)
1450 {
1451 	switch (tinit->alg){
1452 	case cryptocop_alg_csum:
1453 		switch (tinit->csum_mode){
1454 		case cryptocop_csum_le:
1455 		case cryptocop_csum_be:
1456 			break;
1457 		default:
1458 			DEBUG_API(printk("transform_ok: Bad mode set for csum transform\n"));
1459 			return -EINVAL;
1460 		}
1461 	case cryptocop_alg_mem2mem:
1462 	case cryptocop_alg_md5:
1463 	case cryptocop_alg_sha1:
1464 		if (tinit->keylen != 0) {
1465 			DEBUG_API(printk("transform_ok: non-zero keylength, %d, for a digest/csum algorithm\n", tinit->keylen));
1466 			return -EINVAL; /* This check is a bit strict. */
1467 		}
1468 		break;
1469 	case cryptocop_alg_des:
1470 		if (tinit->keylen != 64) {
1471 			DEBUG_API(printk("transform_ok: keylen %d invalid for DES\n", tinit->keylen));
1472 			return -EINVAL;
1473 		}
1474 		break;
1475 	case cryptocop_alg_3des:
1476 		if (tinit->keylen != 192) {
1477 			DEBUG_API(printk("transform_ok: keylen %d invalid for 3DES\n", tinit->keylen));
1478 			return -EINVAL;
1479 		}
1480 		break;
1481 	case cryptocop_alg_aes:
1482 		if (tinit->keylen != 128 && tinit->keylen != 192 && tinit->keylen != 256) {
1483 			DEBUG_API(printk("transform_ok: keylen %d invalid for AES\n", tinit->keylen));
1484 			return -EINVAL;
1485 		}
1486 		break;
1487 	case cryptocop_no_alg:
1488 	default:
1489 		DEBUG_API(printk("transform_ok: no such algorithm %d\n", tinit->alg));
1490 		return -EINVAL;
1491 	}
1492 
1493 	switch (tinit->alg){
1494 	case cryptocop_alg_des:
1495 	case cryptocop_alg_3des:
1496 	case cryptocop_alg_aes:
1497 		if (tinit->cipher_mode != cryptocop_cipher_mode_ecb && tinit->cipher_mode != cryptocop_cipher_mode_cbc) return -EINVAL;
1498 	default:
1499 		 break;
1500 	}
1501 	return 0;
1502 }
1503 
1504 
cryptocop_new_session(cryptocop_session_id * sid,struct cryptocop_transform_init * tinit,int alloc_flag)1505 int cryptocop_new_session(cryptocop_session_id *sid, struct cryptocop_transform_init *tinit, int alloc_flag)
1506 {
1507 	struct cryptocop_session         *sess;
1508 	struct cryptocop_transform_init  *tfrm_in = tinit;
1509 	struct cryptocop_transform_init  *tmp_in;
1510 	int                              no_tfrms = 0;
1511 	int                              i;
1512 	unsigned long int                flags;
1513 
1514 	init_stream_coprocessor(); /* For safety if we are called early */
1515 
1516 	while (tfrm_in){
1517 		int err;
1518 		++no_tfrms;
1519 		if ((err = transform_ok(tfrm_in))) {
1520 			DEBUG_API(printk("cryptocop_new_session, bad transform\n"));
1521 			return err;
1522 		}
1523 		tfrm_in = tfrm_in->next;
1524 	}
1525 	if (0 == no_tfrms) {
1526 		DEBUG_API(printk("cryptocop_new_session, no transforms specified\n"));
1527 		return -EINVAL;
1528 	}
1529 
1530 	sess = kmalloc(sizeof(struct cryptocop_session), alloc_flag);
1531 	if (!sess){
1532 		DEBUG_API(printk("cryptocop_new_session, kmalloc cryptocop_session\n"));
1533 		return -ENOMEM;
1534 	}
1535 
1536 	sess->tfrm_ctx = kmalloc(no_tfrms * sizeof(struct cryptocop_transform_ctx), alloc_flag);
1537 	if (!sess->tfrm_ctx) {
1538 		DEBUG_API(printk("cryptocop_new_session, kmalloc cryptocop_transform_ctx\n"));
1539 		kfree(sess);
1540 		return -ENOMEM;
1541 	}
1542 
1543 	tfrm_in = tinit;
1544 	for (i = 0; i < no_tfrms; i++){
1545 		tmp_in = tfrm_in->next;
1546 		while (tmp_in){
1547 			if (tmp_in->tid == tfrm_in->tid) {
1548 				DEBUG_API(printk("cryptocop_new_session, duplicate transform ids\n"));
1549 				kfree(sess->tfrm_ctx);
1550 				kfree(sess);
1551 				return -EINVAL;
1552 			}
1553 			tmp_in = tmp_in->next;
1554 		}
1555 		memcpy(&sess->tfrm_ctx[i].init, tfrm_in, sizeof(struct cryptocop_transform_init));
1556 		sess->tfrm_ctx[i].dec_key_set = 0;
1557 		sess->tfrm_ctx[i].next = &sess->tfrm_ctx[i] + 1;
1558 
1559 		tfrm_in = tfrm_in->next;
1560 	}
1561 	sess->tfrm_ctx[i-1].next = NULL;
1562 
1563 	spin_lock_irqsave(&cryptocop_sessions_lock, flags);
1564 	sess->sid = next_sid;
1565 	next_sid++;
1566 	/* TODO If we are really paranoid we should do duplicate check to handle sid wraparound.
1567 	 *      OTOH 2^64 is a really large number of session. */
1568 	if (next_sid == 0) next_sid = 1;
1569 
1570 	/* Prepend to session list. */
1571 	sess->next = cryptocop_sessions;
1572 	cryptocop_sessions = sess;
1573 	spin_unlock_irqrestore(&cryptocop_sessions_lock, flags);
1574 	*sid = sess->sid;
1575 	return 0;
1576 }
1577 
1578 
cryptocop_free_session(cryptocop_session_id sid)1579 int cryptocop_free_session(cryptocop_session_id sid)
1580 {
1581 	struct cryptocop_transform_ctx    *tc;
1582 	struct cryptocop_session          *sess = NULL;
1583 	struct cryptocop_session          *psess = NULL;
1584 	unsigned long int                 flags;
1585 	int                               i;
1586 	LIST_HEAD(remove_list);
1587 	struct list_head                  *node, *tmp;
1588 	struct cryptocop_prio_job         *pj;
1589 
1590 	DEBUG(printk("cryptocop_free_session: sid=%lld\n", sid));
1591 
1592 	spin_lock_irqsave(&cryptocop_sessions_lock, flags);
1593 	sess = cryptocop_sessions;
1594 	while (sess && sess->sid != sid){
1595 		psess = sess;
1596 		sess = sess->next;
1597 	}
1598 	if (sess){
1599 		if (psess){
1600 			psess->next = sess->next;
1601 		} else {
1602 			cryptocop_sessions = sess->next;
1603 		}
1604 	}
1605 	spin_unlock_irqrestore(&cryptocop_sessions_lock, flags);
1606 
1607 	if (!sess) return -EINVAL;
1608 
1609 	/* Remove queued jobs. */
1610 	spin_lock_irqsave(&cryptocop_job_queue_lock, flags);
1611 
1612 	for (i = 0; i < cryptocop_prio_no_prios; i++){
1613 		if (!list_empty(&(cryptocop_job_queues[i].jobs))){
1614 			list_for_each_safe(node, tmp, &(cryptocop_job_queues[i].jobs)) {
1615 				pj = list_entry(node, struct cryptocop_prio_job, node);
1616 				if (pj->oper->sid == sid) {
1617 					list_move_tail(node, &remove_list);
1618 				}
1619 			}
1620 		}
1621 	}
1622 	spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
1623 
1624 	list_for_each_safe(node, tmp, &remove_list) {
1625 		list_del(node);
1626 		pj = list_entry(node, struct cryptocop_prio_job, node);
1627 		pj->oper->operation_status = -EAGAIN;  /* EAGAIN is not ideal for job/session terminated but it's the best choice I know of. */
1628 		DEBUG(printk("cryptocop_free_session: pj=0x%p, pj->oper=0x%p, pj->iop=0x%p\n", pj, pj->oper, pj->iop));
1629 		pj->oper->cb(pj->oper, pj->oper->cb_data);
1630 		delete_internal_operation(pj->iop);
1631 		kfree(pj);
1632 	}
1633 
1634 	tc = sess->tfrm_ctx;
1635 	/* Erase keying data. */
1636 	while (tc){
1637 		DEBUG(printk("cryptocop_free_session: memset keys, tfrm id=%d\n", tc->init.tid));
1638 		memset(tc->init.key, 0xff, CRYPTOCOP_MAX_KEY_LENGTH);
1639 		memset(tc->dec_key, 0xff, CRYPTOCOP_MAX_KEY_LENGTH);
1640 		tc = tc->next;
1641 	}
1642 	kfree(sess->tfrm_ctx);
1643 	kfree(sess);
1644 
1645 	return 0;
1646 }
1647 
get_session(cryptocop_session_id sid)1648 static struct cryptocop_session *get_session(cryptocop_session_id sid)
1649 {
1650 	struct cryptocop_session    *sess;
1651 	unsigned long int           flags;
1652 
1653 	spin_lock_irqsave(&cryptocop_sessions_lock, flags);
1654 	sess = cryptocop_sessions;
1655 	while (sess && (sess->sid != sid)){
1656 		sess = sess->next;
1657 	}
1658 	spin_unlock_irqrestore(&cryptocop_sessions_lock, flags);
1659 
1660 	return sess;
1661 }
1662 
get_transform_ctx(struct cryptocop_session * sess,cryptocop_tfrm_id tid)1663 static struct cryptocop_transform_ctx *get_transform_ctx(struct cryptocop_session *sess, cryptocop_tfrm_id tid)
1664 {
1665 	struct cryptocop_transform_ctx *tc = sess->tfrm_ctx;
1666 
1667 	DEBUG(printk("get_transform_ctx, sess=0x%p, tid=%d\n", sess, tid));
1668 	assert(sess != NULL);
1669 	while (tc && tc->init.tid != tid){
1670 		DEBUG(printk("tc=0x%p, tc->next=0x%p\n", tc, tc->next));
1671 		tc = tc->next;
1672 	}
1673 	DEBUG(printk("get_transform_ctx, returning tc=0x%p\n", tc));
1674 	return tc;
1675 }
1676 
1677 
1678 
1679 /* The AES s-transform matrix (s-box). */
1680 static const u8 aes_sbox[256] = {
1681 	99,  124, 119, 123, 242, 107, 111, 197, 48,  1,   103, 43,  254, 215, 171, 118,
1682 	202, 130, 201, 125, 250, 89,  71,  240, 173, 212, 162, 175, 156, 164, 114, 192,
1683 	183, 253, 147, 38,  54,  63,  247, 204, 52,  165, 229, 241, 113, 216, 49,  21,
1684 	4,   199, 35,  195, 24,  150, 5,   154, 7,   18,  128, 226, 235, 39,  178, 117,
1685 	9,   131, 44,  26,  27,  110, 90,  160, 82,  59,  214, 179, 41,  227, 47,  132,
1686 	83,  209, 0,   237, 32,  252, 177, 91,  106, 203, 190, 57,  74,  76,  88,  207,
1687 	208, 239, 170, 251, 67,  77,  51,  133, 69,  249, 2,   127, 80,  60,  159, 168,
1688 	81,  163, 64,  143, 146, 157, 56,  245, 188, 182, 218, 33,  16,  255, 243, 210,
1689 	205, 12,  19,  236, 95,  151, 68,  23,  196, 167, 126, 61,  100, 93,  25,  115,
1690 	96,  129, 79,  220, 34,  42,  144, 136, 70,  238, 184, 20,  222, 94,  11,  219,
1691 	224, 50,  58,  10,  73,  6,   36,  92,  194, 211, 172, 98,  145, 149, 228, 121,
1692 	231, 200, 55,  109, 141, 213, 78,  169, 108, 86,  244, 234, 101, 122, 174, 8,
1693 	186, 120, 37,  46,  28,  166, 180, 198, 232, 221, 116, 31,  75,  189, 139, 138,
1694 	112, 62,  181, 102, 72,  3,   246, 14,  97,  53,  87,  185, 134, 193, 29,  158,
1695 	225, 248, 152, 17,  105, 217, 142, 148, 155, 30,  135, 233, 206, 85,  40,  223,
1696 	140, 161, 137, 13,  191, 230, 66,  104, 65,  153, 45,  15,  176, 84,  187, 22
1697 };
1698 
1699 /* AES has a 32 bit word round constants for each round in the
1700  * key schedule.  round_constant[i] is really Rcon[i+1] in FIPS187.
1701  */
1702 static u32 round_constant[11] = {
1703 	0x01000000, 0x02000000, 0x04000000, 0x08000000,
1704 	0x10000000, 0x20000000, 0x40000000, 0x80000000,
1705 	0x1B000000, 0x36000000, 0x6C000000
1706 };
1707 
1708 /* Apply the s-box to each of the four occtets in w. */
aes_ks_subword(const u32 w)1709 static u32 aes_ks_subword(const u32 w)
1710 {
1711 	u8 bytes[4];
1712 
1713 	*(u32*)(&bytes[0]) = w;
1714 	bytes[0] = aes_sbox[bytes[0]];
1715 	bytes[1] = aes_sbox[bytes[1]];
1716 	bytes[2] = aes_sbox[bytes[2]];
1717 	bytes[3] = aes_sbox[bytes[3]];
1718 	return *(u32*)(&bytes[0]);
1719 }
1720 
1721 /* The encrypt (forward) Rijndael key schedule algorithm pseudo code:
1722  * (Note that AES words are 32 bit long)
1723  *
1724  * KeyExpansion(byte key[4*Nk], word w[Nb*(Nr+1)], Nk){
1725  * word temp
1726  * i = 0
1727  * while (i < Nk) {
1728  *   w[i] = word(key[4*i, 4*i + 1, 4*i + 2, 4*i + 3])
1729  *   i = i + 1
1730  * }
1731  * i = Nk
1732  *
1733  * while (i < (Nb * (Nr + 1))) {
1734  *   temp = w[i - 1]
1735  *   if ((i mod Nk) == 0) {
1736  *     temp = SubWord(RotWord(temp)) xor Rcon[i/Nk]
1737  *   }
1738  *   else if ((Nk > 6) && ((i mod Nk) == 4)) {
1739  *     temp = SubWord(temp)
1740  *   }
1741  *   w[i] = w[i - Nk] xor temp
1742  * }
1743  * RotWord(t) does a 8 bit cyclic shift left on a 32 bit word.
1744  * SubWord(t) applies the AES s-box individually to each octet
1745  * in a 32 bit word.
1746  *
1747  * For AES Nk can have the values 4, 6, and 8 (corresponding to
1748  * values for Nr of 10, 12, and 14).  Nb is always 4.
1749  *
1750  * To construct w[i], w[i - 1] and w[i - Nk] must be
1751  * available.  Consequently we must keep a state of the last Nk words
1752  * to be able to create the last round keys.
1753  */
get_aes_decrypt_key(unsigned char * dec_key,const unsigned char * key,unsigned int keylength)1754 static void get_aes_decrypt_key(unsigned char *dec_key, const unsigned  char *key, unsigned int keylength)
1755 {
1756 	u32 temp;
1757 	u32 w_ring[8]; /* nk is max 8, use elements 0..(nk - 1) as a ringbuffer */
1758 	u8  w_last_ix;
1759 	int i;
1760 	u8  nr, nk;
1761 
1762 	switch (keylength){
1763 	case 128:
1764 		nk = 4;
1765 		nr = 10;
1766 		break;
1767 	case 192:
1768 		nk = 6;
1769 		nr = 12;
1770 		break;
1771 	case 256:
1772 		nk = 8;
1773 		nr = 14;
1774 		break;
1775 	default:
1776 		panic("stream co-processor: bad aes key length in get_aes_decrypt_key\n");
1777 	};
1778 
1779 	/* Need to do host byte order correction here since key is byte oriented and the
1780 	 * kx algorithm is word (u32) oriented. */
1781 	for (i = 0; i < nk; i+=1) {
1782 		w_ring[i] = be32_to_cpu(*(u32*)&key[4*i]);
1783 	}
1784 
1785 	i = (int)nk;
1786 	w_last_ix = i - 1;
1787 	while (i < (4 * (nr + 2))) {
1788 		temp = w_ring[w_last_ix];
1789 		if (!(i % nk)) {
1790 			/* RotWord(temp) */
1791 			temp = (temp << 8) | (temp >> 24);
1792 			temp = aes_ks_subword(temp);
1793 			temp ^= round_constant[i/nk - 1];
1794 		} else if ((nk > 6) && ((i % nk) == 4)) {
1795 			temp = aes_ks_subword(temp);
1796 		}
1797 		w_last_ix = (w_last_ix + 1) % nk; /* This is the same as (i-Nk) mod Nk */
1798 		temp ^= w_ring[w_last_ix];
1799 		w_ring[w_last_ix] = temp;
1800 
1801 		/* We need the round keys for round Nr+1 and Nr+2 (round key
1802 		 * Nr+2 is the round key beyond the last one used when
1803 		 * encrypting).  Rounds are numbered starting from 0, Nr=10
1804 		 * implies 11 rounds are used in encryption/decryption.
1805 		 */
1806 		if (i >= (4 * nr)) {
1807 			/* Need to do host byte order correction here, the key
1808 			 * is byte oriented. */
1809 			*(u32*)dec_key = cpu_to_be32(temp);
1810 			dec_key += 4;
1811 		}
1812 		++i;
1813 	}
1814 }
1815 
1816 
1817 /**** Job/operation management. ****/
1818 
cryptocop_job_queue_insert_csum(struct cryptocop_operation * operation)1819 int cryptocop_job_queue_insert_csum(struct cryptocop_operation *operation)
1820 {
1821 	return cryptocop_job_queue_insert(cryptocop_prio_kernel_csum, operation);
1822 }
1823 
cryptocop_job_queue_insert_crypto(struct cryptocop_operation * operation)1824 int cryptocop_job_queue_insert_crypto(struct cryptocop_operation *operation)
1825 {
1826 	return cryptocop_job_queue_insert(cryptocop_prio_kernel, operation);
1827 }
1828 
cryptocop_job_queue_insert_user_job(struct cryptocop_operation * operation)1829 int cryptocop_job_queue_insert_user_job(struct cryptocop_operation *operation)
1830 {
1831 	return cryptocop_job_queue_insert(cryptocop_prio_user, operation);
1832 }
1833 
cryptocop_job_queue_insert(cryptocop_queue_priority prio,struct cryptocop_operation * operation)1834 static int cryptocop_job_queue_insert(cryptocop_queue_priority prio, struct cryptocop_operation *operation)
1835 {
1836 	int                           ret;
1837 	struct cryptocop_prio_job     *pj = NULL;
1838 	unsigned long int             flags;
1839 
1840 	DEBUG(printk("cryptocop_job_queue_insert(%d, 0x%p)\n", prio, operation));
1841 
1842 	if (!operation || !operation->cb){
1843 		DEBUG_API(printk("cryptocop_job_queue_insert oper=0x%p, NULL operation or callback\n", operation));
1844 		return -EINVAL;
1845 	}
1846 
1847 	if ((ret = cryptocop_job_setup(&pj, operation)) != 0){
1848 		DEBUG_API(printk("cryptocop_job_queue_insert: job setup failed\n"));
1849 		return ret;
1850 	}
1851 	assert(pj != NULL);
1852 
1853 	spin_lock_irqsave(&cryptocop_job_queue_lock, flags);
1854 	list_add_tail(&pj->node, &cryptocop_job_queues[prio].jobs);
1855 	spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
1856 
1857 	/* Make sure a job is running */
1858 	cryptocop_start_job();
1859 	return 0;
1860 }
1861 
1862 static void cryptocop_do_tasklet(unsigned long unused);
1863 DECLARE_TASKLET (cryptocop_tasklet, cryptocop_do_tasklet, 0);
1864 
cryptocop_do_tasklet(unsigned long unused)1865 static void cryptocop_do_tasklet(unsigned long unused)
1866 {
1867 	struct list_head             *node;
1868 	struct cryptocop_prio_job    *pj = NULL;
1869 	unsigned long                flags;
1870 
1871 	DEBUG(printk("cryptocop_do_tasklet: entering\n"));
1872 
1873 	do {
1874 		spin_lock_irqsave(&cryptocop_completed_jobs_lock, flags);
1875 		if (!list_empty(&cryptocop_completed_jobs)){
1876 			node = cryptocop_completed_jobs.next;
1877 			list_del(node);
1878 			pj = list_entry(node, struct cryptocop_prio_job, node);
1879 		} else {
1880 			pj = NULL;
1881 		}
1882 		spin_unlock_irqrestore(&cryptocop_completed_jobs_lock, flags);
1883 		if (pj) {
1884 			assert(pj->oper != NULL);
1885 
1886 			/* Notify consumer of operation completeness. */
1887 			DEBUG(printk("cryptocop_do_tasklet: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
1888 
1889 			pj->oper->operation_status = 0; /* Job is completed. */
1890 			pj->oper->cb(pj->oper, pj->oper->cb_data);
1891 			delete_internal_operation(pj->iop);
1892 			kfree(pj);
1893 		}
1894 	} while (pj != NULL);
1895 
1896 	DEBUG(printk("cryptocop_do_tasklet: exiting\n"));
1897 }
1898 
1899 static irqreturn_t
dma_done_interrupt(int irq,void * dev_id)1900 dma_done_interrupt(int irq, void *dev_id)
1901 {
1902 	struct cryptocop_prio_job *done_job;
1903 	reg_dma_rw_ack_intr ack_intr = {
1904 		.data = 1,
1905 	};
1906 
1907 	REG_WR(dma, IN_DMA_INST, rw_ack_intr, ack_intr);
1908 
1909 	DEBUG(printk("cryptocop DMA done\n"));
1910 
1911 	spin_lock(&running_job_lock);
1912 	if (cryptocop_running_job == NULL){
1913 		printk("stream co-processor got interrupt when not busy\n");
1914 		spin_unlock(&running_job_lock);
1915 		return IRQ_HANDLED;
1916 	}
1917 	done_job = cryptocop_running_job;
1918 	cryptocop_running_job = NULL;
1919 	spin_unlock(&running_job_lock);
1920 
1921 	/* Start processing a job. */
1922 	if (!spin_trylock(&cryptocop_process_lock)){
1923 		DEBUG(printk("cryptocop irq handler, not starting a job\n"));
1924 	} else {
1925 		cryptocop_start_job();
1926 		spin_unlock(&cryptocop_process_lock);
1927 	}
1928 
1929 	done_job->oper->operation_status = 0; /* Job is completed. */
1930 	if (done_job->oper->fast_callback){
1931 		/* This operation wants callback from interrupt. */
1932 		done_job->oper->cb(done_job->oper, done_job->oper->cb_data);
1933 		delete_internal_operation(done_job->iop);
1934 		kfree(done_job);
1935 	} else {
1936 		spin_lock(&cryptocop_completed_jobs_lock);
1937 		list_add_tail(&(done_job->node), &cryptocop_completed_jobs);
1938 		spin_unlock(&cryptocop_completed_jobs_lock);
1939 		tasklet_schedule(&cryptocop_tasklet);
1940 	}
1941 
1942 	DEBUG(printk("cryptocop leave irq handler\n"));
1943 	return IRQ_HANDLED;
1944 }
1945 
1946 
1947 /* Setup interrupts and DMA channels. */
init_cryptocop(void)1948 static int init_cryptocop(void)
1949 {
1950 	unsigned long          flags;
1951 	reg_dma_rw_cfg         dma_cfg = {.en = 1};
1952 	reg_dma_rw_intr_mask   intr_mask_in = {.data = regk_dma_yes}; /* Only want descriptor interrupts from the DMA in channel. */
1953 	reg_dma_rw_ack_intr    ack_intr = {.data = 1,.in_eop = 1 };
1954 	reg_strcop_rw_cfg      strcop_cfg = {
1955 		.ipend = regk_strcop_little,
1956 		.td1 = regk_strcop_e,
1957 		.td2 = regk_strcop_d,
1958 		.td3 = regk_strcop_e,
1959 		.ignore_sync = 0,
1960 		.en = 1
1961 	};
1962 
1963 	if (request_irq(DMA_IRQ, dma_done_interrupt, 0,
1964 			"stream co-processor DMA", NULL))
1965 		panic("request_irq stream co-processor irq dma9");
1966 
1967 	(void)crisv32_request_dma(OUT_DMA, "strcop", DMA_PANIC_ON_ERROR,
1968 		0, dma_strp);
1969 	(void)crisv32_request_dma(IN_DMA, "strcop", DMA_PANIC_ON_ERROR,
1970 		0, dma_strp);
1971 
1972 	local_irq_save(flags);
1973 
1974 	/* Reset and enable the cryptocop. */
1975 	strcop_cfg.en = 0;
1976 	REG_WR(strcop, regi_strcop, rw_cfg, strcop_cfg);
1977 	strcop_cfg.en = 1;
1978 	REG_WR(strcop, regi_strcop, rw_cfg, strcop_cfg);
1979 
1980 	/* Enable DMAs. */
1981 	REG_WR(dma, IN_DMA_INST, rw_cfg, dma_cfg); /* input DMA */
1982 	REG_WR(dma, OUT_DMA_INST, rw_cfg, dma_cfg); /* output DMA */
1983 
1984 	/* Set up wordsize = 4 for DMAs. */
1985 	DMA_WR_CMD(OUT_DMA_INST, regk_dma_set_w_size4);
1986 	DMA_WR_CMD(IN_DMA_INST, regk_dma_set_w_size4);
1987 
1988 	/* Enable interrupts. */
1989 	REG_WR(dma, IN_DMA_INST, rw_intr_mask, intr_mask_in);
1990 
1991 	/* Clear intr ack. */
1992 	REG_WR(dma, IN_DMA_INST, rw_ack_intr, ack_intr);
1993 
1994 	local_irq_restore(flags);
1995 
1996 	return 0;
1997 }
1998 
1999 /* Free used cryptocop hw resources (interrupt and DMA channels). */
release_cryptocop(void)2000 static void release_cryptocop(void)
2001 {
2002 	unsigned long          flags;
2003 	reg_dma_rw_cfg         dma_cfg = {.en = 0};
2004 	reg_dma_rw_intr_mask   intr_mask_in = {0};
2005 	reg_dma_rw_ack_intr    ack_intr = {.data = 1,.in_eop = 1 };
2006 
2007 	local_irq_save(flags);
2008 
2009 	/* Clear intr ack. */
2010 	REG_WR(dma, IN_DMA_INST, rw_ack_intr, ack_intr);
2011 
2012 	/* Disable DMAs. */
2013 	REG_WR(dma, IN_DMA_INST, rw_cfg, dma_cfg); /* input DMA */
2014 	REG_WR(dma, OUT_DMA_INST, rw_cfg, dma_cfg); /* output DMA */
2015 
2016 	/* Disable interrupts. */
2017 	REG_WR(dma, IN_DMA_INST, rw_intr_mask, intr_mask_in);
2018 
2019 	local_irq_restore(flags);
2020 
2021 	free_irq(DMA_IRQ, NULL);
2022 
2023 	(void)crisv32_free_dma(OUT_DMA);
2024 	(void)crisv32_free_dma(IN_DMA);
2025 }
2026 
2027 
2028 /* Init job queue. */
cryptocop_job_queue_init(void)2029 static int cryptocop_job_queue_init(void)
2030 {
2031 	int i;
2032 
2033 	INIT_LIST_HEAD(&cryptocop_completed_jobs);
2034 
2035 	for (i = 0; i < cryptocop_prio_no_prios; i++){
2036 		cryptocop_job_queues[i].prio = (cryptocop_queue_priority)i;
2037 		INIT_LIST_HEAD(&cryptocop_job_queues[i].jobs);
2038 	}
2039 	return 0;
2040 }
2041 
2042 
cryptocop_job_queue_close(void)2043 static void cryptocop_job_queue_close(void)
2044 {
2045 	struct list_head               *node, *tmp;
2046 	struct cryptocop_prio_job      *pj = NULL;
2047 	unsigned long int              process_flags, flags;
2048 	int                            i;
2049 
2050 	/* FIXME: This is as yet untested code. */
2051 
2052 	/* Stop strcop from getting an operation to process while we are closing the
2053 	   module. */
2054 	spin_lock_irqsave(&cryptocop_process_lock, process_flags);
2055 
2056 	/* Empty the job queue. */
2057 	for (i = 0; i < cryptocop_prio_no_prios; i++){
2058 		if (!list_empty(&(cryptocop_job_queues[i].jobs))){
2059 			list_for_each_safe(node, tmp, &(cryptocop_job_queues[i].jobs)) {
2060 				pj = list_entry(node, struct cryptocop_prio_job, node);
2061 				list_del(node);
2062 
2063 				/* Call callback to notify consumer of job removal. */
2064 				DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
2065 				pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */
2066 				pj->oper->cb(pj->oper, pj->oper->cb_data);
2067 
2068 				delete_internal_operation(pj->iop);
2069 				kfree(pj);
2070 			}
2071 		}
2072 	}
2073 	spin_unlock_irqrestore(&cryptocop_process_lock, process_flags);
2074 
2075 	/* Remove the running job, if any. */
2076 	spin_lock_irqsave(&running_job_lock, flags);
2077 	if (cryptocop_running_job){
2078 		reg_strcop_rw_cfg rw_cfg;
2079 		reg_dma_rw_cfg    dma_out_cfg, dma_in_cfg;
2080 
2081 		/* Stop DMA. */
2082 		dma_out_cfg = REG_RD(dma, OUT_DMA_INST, rw_cfg);
2083 		dma_out_cfg.en = regk_dma_no;
2084 		REG_WR(dma, OUT_DMA_INST, rw_cfg, dma_out_cfg);
2085 
2086 		dma_in_cfg = REG_RD(dma, IN_DMA_INST, rw_cfg);
2087 		dma_in_cfg.en = regk_dma_no;
2088 		REG_WR(dma, IN_DMA_INST, rw_cfg, dma_in_cfg);
2089 
2090 		/* Disable the cryptocop. */
2091 		rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg);
2092 		rw_cfg.en = 0;
2093 		REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);
2094 
2095 		pj = cryptocop_running_job;
2096 		cryptocop_running_job = NULL;
2097 
2098 		/* Call callback to notify consumer of job removal. */
2099 		DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
2100 		pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */
2101 		pj->oper->cb(pj->oper, pj->oper->cb_data);
2102 
2103 		delete_internal_operation(pj->iop);
2104 		kfree(pj);
2105 	}
2106 	spin_unlock_irqrestore(&running_job_lock, flags);
2107 
2108 	/* Remove completed jobs, if any. */
2109 	spin_lock_irqsave(&cryptocop_completed_jobs_lock, flags);
2110 
2111 	list_for_each_safe(node, tmp, &cryptocop_completed_jobs) {
2112 		pj = list_entry(node, struct cryptocop_prio_job, node);
2113 		list_del(node);
2114 		/* Call callback to notify consumer of job removal. */
2115 		DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
2116 		pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */
2117 		pj->oper->cb(pj->oper, pj->oper->cb_data);
2118 
2119 		delete_internal_operation(pj->iop);
2120 		kfree(pj);
2121 	}
2122 	spin_unlock_irqrestore(&cryptocop_completed_jobs_lock, flags);
2123 }
2124 
2125 
cryptocop_start_job(void)2126 static void cryptocop_start_job(void)
2127 {
2128 	int                          i;
2129 	struct cryptocop_prio_job    *pj;
2130 	unsigned long int            flags;
2131 	unsigned long int            running_job_flags;
2132 	reg_strcop_rw_cfg            rw_cfg = {.en = 1, .ignore_sync = 0};
2133 
2134 	DEBUG(printk("cryptocop_start_job: entering\n"));
2135 
2136 	spin_lock_irqsave(&running_job_lock, running_job_flags);
2137 	if (cryptocop_running_job != NULL){
2138 		/* Already running. */
2139 		DEBUG(printk("cryptocop_start_job: already running, exit\n"));
2140 		spin_unlock_irqrestore(&running_job_lock, running_job_flags);
2141 		return;
2142 	}
2143 	spin_lock_irqsave(&cryptocop_job_queue_lock, flags);
2144 
2145 	/* Check the queues in priority order. */
2146 	for (i = cryptocop_prio_kernel_csum; (i < cryptocop_prio_no_prios) && list_empty(&cryptocop_job_queues[i].jobs); i++);
2147 	if (i == cryptocop_prio_no_prios) {
2148 		spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
2149 		spin_unlock_irqrestore(&running_job_lock, running_job_flags);
2150 		DEBUG(printk("cryptocop_start_job: no jobs to run\n"));
2151 		return; /* No jobs to run */
2152 	}
2153 	DEBUG(printk("starting job for prio %d\n", i));
2154 
2155 	/* TODO: Do not starve lower priority jobs.  Let in a lower
2156 	 * prio job for every N-th processed higher prio job or some
2157 	 * other scheduling policy.  This could reasonably be
2158 	 * tweakable since the optimal balance would depend on the
2159 	 * type of load on the system. */
2160 
2161 	/* Pull the DMA lists from the job and start the DMA client. */
2162 	pj = list_entry(cryptocop_job_queues[i].jobs.next, struct cryptocop_prio_job, node);
2163 	list_del(&pj->node);
2164 	spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
2165 	cryptocop_running_job = pj;
2166 
2167 	/* Set config register (3DES and CSUM modes). */
2168 	switch (pj->iop->tdes_mode){
2169 	case cryptocop_3des_eee:
2170 		rw_cfg.td1 = regk_strcop_e;
2171 		rw_cfg.td2 = regk_strcop_e;
2172 		rw_cfg.td3 = regk_strcop_e;
2173 		break;
2174 	case cryptocop_3des_eed:
2175 		rw_cfg.td1 = regk_strcop_e;
2176 		rw_cfg.td2 = regk_strcop_e;
2177 		rw_cfg.td3 = regk_strcop_d;
2178 		break;
2179 	case cryptocop_3des_ede:
2180 		rw_cfg.td1 = regk_strcop_e;
2181 		rw_cfg.td2 = regk_strcop_d;
2182 		rw_cfg.td3 = regk_strcop_e;
2183 		break;
2184 	case cryptocop_3des_edd:
2185 		rw_cfg.td1 = regk_strcop_e;
2186 		rw_cfg.td2 = regk_strcop_d;
2187 		rw_cfg.td3 = regk_strcop_d;
2188 		break;
2189 	case cryptocop_3des_dee:
2190 		rw_cfg.td1 = regk_strcop_d;
2191 		rw_cfg.td2 = regk_strcop_e;
2192 		rw_cfg.td3 = regk_strcop_e;
2193 		break;
2194 	case cryptocop_3des_ded:
2195 		rw_cfg.td1 = regk_strcop_d;
2196 		rw_cfg.td2 = regk_strcop_e;
2197 		rw_cfg.td3 = regk_strcop_d;
2198 		break;
2199 	case cryptocop_3des_dde:
2200 		rw_cfg.td1 = regk_strcop_d;
2201 		rw_cfg.td2 = regk_strcop_d;
2202 		rw_cfg.td3 = regk_strcop_e;
2203 		break;
2204 	case cryptocop_3des_ddd:
2205 		rw_cfg.td1 = regk_strcop_d;
2206 		rw_cfg.td2 = regk_strcop_d;
2207 		rw_cfg.td3 = regk_strcop_d;
2208 		break;
2209 	default:
2210 		DEBUG(printk("cryptocop_setup_dma_list: bad 3DES mode\n"));
2211 	}
2212 	switch (pj->iop->csum_mode){
2213 	case cryptocop_csum_le:
2214 		rw_cfg.ipend = regk_strcop_little;
2215 		break;
2216 	case cryptocop_csum_be:
2217 		rw_cfg.ipend = regk_strcop_big;
2218 		break;
2219 	default:
2220 		DEBUG(printk("cryptocop_setup_dma_list: bad checksum mode\n"));
2221 	}
2222 	REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);
2223 
2224 	DEBUG(printk("cryptocop_start_job: starting DMA, new cryptocop_running_job=0x%p\n"
2225 		     "ctx_in: 0x%p, phys: 0x%p\n"
2226 		     "ctx_out: 0x%p, phys: 0x%p\n",
2227 		     pj,
2228 		     &pj->iop->ctx_in, (char*)virt_to_phys(&pj->iop->ctx_in),
2229 		     &pj->iop->ctx_out, (char*)virt_to_phys(&pj->iop->ctx_out)));
2230 
2231 	/* Start input DMA. */
2232 	flush_dma_context(&pj->iop->ctx_in);
2233 	DMA_START_CONTEXT(IN_DMA_INST, virt_to_phys(&pj->iop->ctx_in));
2234 
2235 	/* Start output DMA. */
2236 	DMA_START_CONTEXT(OUT_DMA_INST, virt_to_phys(&pj->iop->ctx_out));
2237 
2238 	spin_unlock_irqrestore(&running_job_lock, running_job_flags);
2239 	DEBUG(printk("cryptocop_start_job: exiting\n"));
2240 }
2241 
2242 
cryptocop_job_setup(struct cryptocop_prio_job ** pj,struct cryptocop_operation * operation)2243 static int cryptocop_job_setup(struct cryptocop_prio_job **pj, struct cryptocop_operation *operation)
2244 {
2245 	int  err;
2246 	int  alloc_flag = operation->in_interrupt ? GFP_ATOMIC : GFP_KERNEL;
2247 	void *iop_alloc_ptr = NULL;
2248 
2249 	*pj = kmalloc(sizeof (struct cryptocop_prio_job), alloc_flag);
2250 	if (!*pj) return -ENOMEM;
2251 
2252 	DEBUG(printk("cryptocop_job_setup: operation=0x%p\n", operation));
2253 
2254 	(*pj)->oper = operation;
2255 	DEBUG(printk("cryptocop_job_setup, cb=0x%p cb_data=0x%p\n",  (*pj)->oper->cb, (*pj)->oper->cb_data));
2256 
2257 	if (operation->use_dmalists) {
2258 		DEBUG(print_user_dma_lists(&operation->list_op));
2259 		if (!operation->list_op.inlist || !operation->list_op.outlist || !operation->list_op.out_data_buf || !operation->list_op.in_data_buf){
2260 			DEBUG_API(printk("cryptocop_job_setup: bad indata (use_dmalists)\n"));
2261 			kfree(*pj);
2262 			return -EINVAL;
2263 		}
2264 		iop_alloc_ptr = kmalloc(DESCR_ALLOC_PAD + sizeof(struct cryptocop_int_operation), alloc_flag);
2265 		if (!iop_alloc_ptr) {
2266 			DEBUG_API(printk("cryptocop_job_setup: kmalloc cryptocop_int_operation\n"));
2267 			kfree(*pj);
2268 			return -ENOMEM;
2269 		}
2270 		(*pj)->iop = (struct cryptocop_int_operation*)(((unsigned long int)(iop_alloc_ptr + DESCR_ALLOC_PAD + offsetof(struct cryptocop_int_operation, ctx_out)) & ~0x0000001F) - offsetof(struct cryptocop_int_operation, ctx_out));
2271 		DEBUG(memset((*pj)->iop, 0xff, sizeof(struct cryptocop_int_operation)));
2272 		(*pj)->iop->alloc_ptr = iop_alloc_ptr;
2273 		(*pj)->iop->sid = operation->sid;
2274 		(*pj)->iop->cdesc_out = NULL;
2275 		(*pj)->iop->cdesc_in = NULL;
2276 		(*pj)->iop->tdes_mode = operation->list_op.tdes_mode;
2277 		(*pj)->iop->csum_mode = operation->list_op.csum_mode;
2278 		(*pj)->iop->ddesc_out = operation->list_op.outlist;
2279 		(*pj)->iop->ddesc_in = operation->list_op.inlist;
2280 
2281 		/* Setup DMA contexts. */
2282 		(*pj)->iop->ctx_out.next = NULL;
2283 		(*pj)->iop->ctx_out.eol = 1;
2284 		(*pj)->iop->ctx_out.saved_data = operation->list_op.outlist;
2285 		(*pj)->iop->ctx_out.saved_data_buf = operation->list_op.out_data_buf;
2286 
2287 		(*pj)->iop->ctx_in.next = NULL;
2288 		(*pj)->iop->ctx_in.eol = 1;
2289 		(*pj)->iop->ctx_in.saved_data = operation->list_op.inlist;
2290 		(*pj)->iop->ctx_in.saved_data_buf = operation->list_op.in_data_buf;
2291 	} else {
2292 		if ((err = cryptocop_setup_dma_list(operation, &(*pj)->iop, alloc_flag))) {
2293 			DEBUG_API(printk("cryptocop_job_setup: cryptocop_setup_dma_list failed %d\n", err));
2294 			kfree(*pj);
2295 			return err;
2296 		}
2297 	}
2298 	DEBUG(print_dma_descriptors((*pj)->iop));
2299 
2300 	DEBUG(printk("cryptocop_job_setup, DMA list setup successful\n"));
2301 
2302 	return 0;
2303 }
2304 
cryptocop_open(struct inode * inode,struct file * filp)2305 static int cryptocop_open(struct inode *inode, struct file *filp)
2306 {
2307 	int p = iminor(inode);
2308 
2309 	if (p != CRYPTOCOP_MINOR) return -EINVAL;
2310 
2311 	filp->private_data = NULL;
2312 	return 0;
2313 }
2314 
2315 
cryptocop_release(struct inode * inode,struct file * filp)2316 static int cryptocop_release(struct inode *inode, struct file *filp)
2317 {
2318 	struct cryptocop_private *dev = filp->private_data;
2319 	struct cryptocop_private *dev_next;
2320 
2321 	while (dev){
2322 		dev_next = dev->next;
2323 		if (dev->sid != CRYPTOCOP_SESSION_ID_NONE) {
2324 			(void)cryptocop_free_session(dev->sid);
2325 		}
2326 		kfree(dev);
2327 		dev = dev_next;
2328 	}
2329 
2330 	return 0;
2331 }
2332 
2333 
cryptocop_ioctl_close_session(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)2334 static int cryptocop_ioctl_close_session(struct inode *inode, struct file *filp,
2335 					 unsigned int cmd, unsigned long arg)
2336 {
2337 	struct cryptocop_private  *dev = filp->private_data;
2338 	struct cryptocop_private  *prev_dev = NULL;
2339 	struct strcop_session_op  *sess_op = (struct strcop_session_op *)arg;
2340 	struct strcop_session_op  sop;
2341 	int                       err;
2342 
2343 	DEBUG(printk("cryptocop_ioctl_close_session\n"));
2344 
2345 	if (!access_ok(VERIFY_READ, sess_op, sizeof(struct strcop_session_op)))
2346 		return -EFAULT;
2347 	err = copy_from_user(&sop, sess_op, sizeof(struct strcop_session_op));
2348 	if (err) return -EFAULT;
2349 
2350 	while (dev && (dev->sid != sop.ses_id)) {
2351 		prev_dev = dev;
2352 		dev = dev->next;
2353 	}
2354 	if (dev){
2355 		if (prev_dev){
2356 			prev_dev->next = dev->next;
2357 		} else {
2358 			filp->private_data = dev->next;
2359 		}
2360 		err = cryptocop_free_session(dev->sid);
2361 		if (err) return -EFAULT;
2362 	} else {
2363 		DEBUG_API(printk("cryptocop_ioctl_close_session: session %lld not found\n", sop.ses_id));
2364 		return -EINVAL;
2365 	}
2366 	return 0;
2367 }
2368 
2369 
ioctl_process_job_callback(struct cryptocop_operation * op,void * cb_data)2370 static void ioctl_process_job_callback(struct cryptocop_operation *op, void*cb_data)
2371 {
2372 	struct ioctl_job_cb_ctx *jc = (struct ioctl_job_cb_ctx *)cb_data;
2373 
2374 	DEBUG(printk("ioctl_process_job_callback: op=0x%p, cb_data=0x%p\n", op, cb_data));
2375 
2376 	jc->processed = 1;
2377 	wake_up(&cryptocop_ioc_process_wq);
2378 }
2379 
2380 
2381 #define CRYPTOCOP_IOCTL_CIPHER_TID  (1)
2382 #define CRYPTOCOP_IOCTL_DIGEST_TID  (2)
2383 #define CRYPTOCOP_IOCTL_CSUM_TID    (3)
2384 
first_cfg_change_ix(struct strcop_crypto_op * crp_op)2385 static size_t first_cfg_change_ix(struct strcop_crypto_op *crp_op)
2386 {
2387 	size_t ch_ix = 0;
2388 
2389 	if (crp_op->do_cipher) ch_ix = crp_op->cipher_start;
2390 	if (crp_op->do_digest && (crp_op->digest_start < ch_ix)) ch_ix = crp_op->digest_start;
2391 	if (crp_op->do_csum && (crp_op->csum_start < ch_ix)) ch_ix = crp_op->csum_start;
2392 
2393 	DEBUG(printk("first_cfg_change_ix: ix=%d\n", ch_ix));
2394 	return ch_ix;
2395 }
2396 
2397 
next_cfg_change_ix(struct strcop_crypto_op * crp_op,size_t ix)2398 static size_t next_cfg_change_ix(struct strcop_crypto_op *crp_op, size_t ix)
2399 {
2400 	size_t ch_ix = INT_MAX;
2401 	size_t tmp_ix = 0;
2402 
2403 	if (crp_op->do_cipher && ((crp_op->cipher_start + crp_op->cipher_len) > ix)){
2404 		if (crp_op->cipher_start > ix) {
2405 			ch_ix = crp_op->cipher_start;
2406 		} else {
2407 			ch_ix = crp_op->cipher_start + crp_op->cipher_len;
2408 		}
2409 	}
2410 	if (crp_op->do_digest && ((crp_op->digest_start + crp_op->digest_len) > ix)){
2411 		if (crp_op->digest_start > ix) {
2412 			tmp_ix = crp_op->digest_start;
2413 		} else {
2414 			tmp_ix = crp_op->digest_start + crp_op->digest_len;
2415 		}
2416 		if (tmp_ix < ch_ix) ch_ix = tmp_ix;
2417 	}
2418 	if (crp_op->do_csum && ((crp_op->csum_start + crp_op->csum_len) > ix)){
2419 		if (crp_op->csum_start > ix) {
2420 			tmp_ix = crp_op->csum_start;
2421 		} else {
2422 			tmp_ix = crp_op->csum_start + crp_op->csum_len;
2423 		}
2424 		if (tmp_ix < ch_ix) ch_ix = tmp_ix;
2425 	}
2426 	if (ch_ix == INT_MAX) ch_ix = ix;
2427 	DEBUG(printk("next_cfg_change_ix prev ix=%d, next ix=%d\n", ix, ch_ix));
2428 	return ch_ix;
2429 }
2430 
2431 
2432 /* Map map_length bytes from the pages starting on *pageix and *pageoffset to iovecs starting on *iovix.
2433  * Return -1 for ok, 0 for fail. */
map_pages_to_iovec(struct iovec * iov,int iovlen,int * iovix,struct page ** pages,int nopages,int * pageix,int * pageoffset,int map_length)2434 static int map_pages_to_iovec(struct iovec *iov, int iovlen, int *iovix, struct page **pages, int nopages, int *pageix, int *pageoffset, int map_length )
2435 {
2436 	int tmplen;
2437 
2438 	assert(iov != NULL);
2439 	assert(iovix != NULL);
2440 	assert(pages != NULL);
2441 	assert(pageix != NULL);
2442 	assert(pageoffset != NULL);
2443 
2444 	DEBUG(printk("map_pages_to_iovec, map_length=%d, iovlen=%d, *iovix=%d, nopages=%d, *pageix=%d, *pageoffset=%d\n", map_length, iovlen, *iovix, nopages, *pageix, *pageoffset));
2445 
2446 	while (map_length > 0){
2447 		DEBUG(printk("map_pages_to_iovec, map_length=%d, iovlen=%d, *iovix=%d, nopages=%d, *pageix=%d, *pageoffset=%d\n", map_length, iovlen, *iovix, nopages, *pageix, *pageoffset));
2448 		if (*iovix >= iovlen){
2449 			DEBUG_API(printk("map_page_to_iovec: *iovix=%d >= iovlen=%d\n", *iovix, iovlen));
2450 			return 0;
2451 		}
2452 		if (*pageix >= nopages){
2453 			DEBUG_API(printk("map_page_to_iovec: *pageix=%d >= nopages=%d\n", *pageix, nopages));
2454 			return 0;
2455 		}
2456 		iov[*iovix].iov_base = (unsigned char*)page_address(pages[*pageix]) + *pageoffset;
2457 		tmplen = PAGE_SIZE - *pageoffset;
2458 		if (tmplen < map_length){
2459 			(*pageoffset) = 0;
2460 			(*pageix)++;
2461 		} else {
2462 			tmplen = map_length;
2463 			(*pageoffset) += map_length;
2464 		}
2465 		DEBUG(printk("mapping %d bytes from page %d (or %d) to iovec %d\n", tmplen, *pageix, *pageix-1, *iovix));
2466 		iov[*iovix].iov_len = tmplen;
2467 		map_length -= tmplen;
2468 		(*iovix)++;
2469 	}
2470 	DEBUG(printk("map_page_to_iovec, exit, *iovix=%d\n", *iovix));
2471 	return -1;
2472 }
2473 
2474 
2475 
cryptocop_ioctl_process(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)2476 static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2477 {
2478 	int                             i;
2479 	struct cryptocop_private        *dev = filp->private_data;
2480 	struct strcop_crypto_op         *crp_oper = (struct strcop_crypto_op *)arg;
2481 	struct strcop_crypto_op         oper = {0};
2482 	int                             err = 0;
2483 	struct cryptocop_operation      *cop = NULL;
2484 
2485 	struct ioctl_job_cb_ctx         *jc = NULL;
2486 
2487 	struct page                     **inpages = NULL;
2488 	struct page                     **outpages = NULL;
2489 	int                             noinpages = 0;
2490 	int                             nooutpages = 0;
2491 
2492 	struct cryptocop_desc           descs[5]; /* Max 5 descriptors are needed, there are three transforms that
2493 						   * can get connected/disconnected on different places in the indata. */
2494 	struct cryptocop_desc_cfg       dcfgs[5*3];
2495 	int                             desc_ix = 0;
2496 	int                             dcfg_ix = 0;
2497 	struct cryptocop_tfrm_cfg       ciph_tcfg = {0};
2498 	struct cryptocop_tfrm_cfg       digest_tcfg = {0};
2499 	struct cryptocop_tfrm_cfg       csum_tcfg = {0};
2500 
2501 	unsigned char                   *digest_result = NULL;
2502 	int                             digest_length = 0;
2503 	int                             cblocklen = 0;
2504 	unsigned char                   csum_result[CSUM_BLOCK_LENGTH];
2505 	struct cryptocop_session        *sess;
2506 
2507 	int    iovlen = 0;
2508 	int    iovix = 0;
2509 	int    pageix = 0;
2510 	int    pageoffset = 0;
2511 
2512 	size_t prev_ix = 0;
2513 	size_t next_ix;
2514 
2515 	int    cipher_active, digest_active, csum_active;
2516 	int    end_digest, end_csum;
2517 	int    digest_done = 0;
2518 	int    cipher_done = 0;
2519 	int    csum_done = 0;
2520 
2521 	DEBUG(printk("cryptocop_ioctl_process\n"));
2522 
2523 	if (!access_ok(VERIFY_WRITE, crp_oper, sizeof(struct strcop_crypto_op))){
2524 		DEBUG_API(printk("cryptocop_ioctl_process: !access_ok crp_oper!\n"));
2525 		return -EFAULT;
2526 	}
2527 	if (copy_from_user(&oper, crp_oper, sizeof(struct strcop_crypto_op))) {
2528 		DEBUG_API(printk("cryptocop_ioctl_process: copy_from_user\n"));
2529 		return -EFAULT;
2530 	}
2531 	DEBUG(print_strcop_crypto_op(&oper));
2532 
2533 	while (dev && dev->sid != oper.ses_id) dev = dev->next;
2534 	if (!dev){
2535 		DEBUG_API(printk("cryptocop_ioctl_process: session %lld not found\n", oper.ses_id));
2536 		return -EINVAL;
2537 	}
2538 
2539 	/* Check buffers. */
2540 	if (((oper.indata + oper.inlen) < oper.indata) || ((oper.cipher_outdata + oper.cipher_outlen) < oper.cipher_outdata)){
2541 		DEBUG_API(printk("cryptocop_ioctl_process: user buffers wrapped around, bad user!\n"));
2542 		return -EINVAL;
2543 	}
2544 
2545 	if (!access_ok(VERIFY_WRITE, oper.cipher_outdata, oper.cipher_outlen)){
2546 		DEBUG_API(printk("cryptocop_ioctl_process: !access_ok out data!\n"));
2547 		return -EFAULT;
2548 	}
2549 	if (!access_ok(VERIFY_READ, oper.indata, oper.inlen)){
2550 		DEBUG_API(printk("cryptocop_ioctl_process: !access_ok in data!\n"));
2551 		return -EFAULT;
2552 	}
2553 
2554 	cop = kmalloc(sizeof(struct cryptocop_operation), GFP_KERNEL);
2555 	if (!cop) {
2556 		DEBUG_API(printk("cryptocop_ioctl_process: kmalloc\n"));
2557 		return -ENOMEM;
2558 	}
2559 	jc = kmalloc(sizeof(struct ioctl_job_cb_ctx), GFP_KERNEL);
2560 	if (!jc) {
2561 		DEBUG_API(printk("cryptocop_ioctl_process: kmalloc\n"));
2562 		err = -ENOMEM;
2563 		goto error_cleanup;
2564 	}
2565 	jc->processed = 0;
2566 
2567 	cop->cb_data = jc;
2568 	cop->cb = ioctl_process_job_callback;
2569 	cop->operation_status = 0;
2570 	cop->use_dmalists = 0;
2571 	cop->in_interrupt = 0;
2572 	cop->fast_callback = 0;
2573 	cop->tfrm_op.tfrm_cfg = NULL;
2574 	cop->tfrm_op.desc = NULL;
2575 	cop->tfrm_op.indata = NULL;
2576 	cop->tfrm_op.incount = 0;
2577 	cop->tfrm_op.inlen = 0;
2578 	cop->tfrm_op.outdata = NULL;
2579 	cop->tfrm_op.outcount = 0;
2580 	cop->tfrm_op.outlen = 0;
2581 
2582 	sess = get_session(oper.ses_id);
2583 	if (!sess){
2584 		DEBUG_API(printk("cryptocop_ioctl_process: bad session id.\n"));
2585 		kfree(cop);
2586 		kfree(jc);
2587 		return -EINVAL;
2588 	}
2589 
2590 	if (oper.do_cipher) {
2591 		unsigned int                    cipher_outlen = 0;
2592 		struct cryptocop_transform_ctx  *tc = get_transform_ctx(sess, CRYPTOCOP_IOCTL_CIPHER_TID);
2593 		if (!tc) {
2594 			DEBUG_API(printk("cryptocop_ioctl_process: no cipher transform in session.\n"));
2595 			err = -EINVAL;
2596 			goto error_cleanup;
2597 		}
2598 		ciph_tcfg.tid = CRYPTOCOP_IOCTL_CIPHER_TID;
2599 		ciph_tcfg.inject_ix = 0;
2600 		ciph_tcfg.flags = 0;
2601 		if ((oper.cipher_start < 0) || (oper.cipher_len <= 0) || (oper.cipher_start > oper.inlen) || ((oper.cipher_start + oper.cipher_len) > oper.inlen)){
2602 			DEBUG_API(printk("cryptocop_ioctl_process: bad cipher length\n"));
2603 			kfree(cop);
2604 			kfree(jc);
2605 			return -EINVAL;
2606 		}
2607 		cblocklen = tc->init.alg == cryptocop_alg_aes ? AES_BLOCK_LENGTH : DES_BLOCK_LENGTH;
2608 		if (oper.cipher_len % cblocklen) {
2609 			kfree(cop);
2610 			kfree(jc);
2611 			DEBUG_API(printk("cryptocop_ioctl_process: cipher inlength not multiple of block length.\n"));
2612 			return -EINVAL;
2613 		}
2614 		cipher_outlen = oper.cipher_len;
2615 		if (tc->init.cipher_mode == cryptocop_cipher_mode_cbc){
2616 			if (oper.cipher_explicit) {
2617 				ciph_tcfg.flags |= CRYPTOCOP_EXPLICIT_IV;
2618 				memcpy(ciph_tcfg.iv, oper.cipher_iv, cblocklen);
2619 			} else {
2620 				cipher_outlen = oper.cipher_len - cblocklen;
2621 			}
2622 		} else {
2623 			if (oper.cipher_explicit){
2624 				kfree(cop);
2625 				kfree(jc);
2626 				DEBUG_API(printk("cryptocop_ioctl_process: explicit_iv when not CBC mode\n"));
2627 				return -EINVAL;
2628 			}
2629 		}
2630 		if (oper.cipher_outlen != cipher_outlen) {
2631 			kfree(cop);
2632 			kfree(jc);
2633 			DEBUG_API(printk("cryptocop_ioctl_process: cipher_outlen incorrect, should be %d not %d.\n", cipher_outlen, oper.cipher_outlen));
2634 			return -EINVAL;
2635 		}
2636 
2637 		if (oper.decrypt){
2638 			ciph_tcfg.flags |= CRYPTOCOP_DECRYPT;
2639 		} else {
2640 			ciph_tcfg.flags |= CRYPTOCOP_ENCRYPT;
2641 		}
2642 		ciph_tcfg.next = cop->tfrm_op.tfrm_cfg;
2643 		cop->tfrm_op.tfrm_cfg = &ciph_tcfg;
2644 	}
2645 	if (oper.do_digest){
2646 		struct cryptocop_transform_ctx *tc = get_transform_ctx(sess, CRYPTOCOP_IOCTL_DIGEST_TID);
2647 		if (!tc) {
2648 			DEBUG_API(printk("cryptocop_ioctl_process: no digest transform in session.\n"));
2649 			err = -EINVAL;
2650 			goto error_cleanup;
2651 		}
2652 		digest_length = tc->init.alg == cryptocop_alg_md5 ? 16 : 20;
2653 		digest_result = kmalloc(digest_length, GFP_KERNEL);
2654 		if (!digest_result) {
2655 			DEBUG_API(printk("cryptocop_ioctl_process: kmalloc digest_result\n"));
2656 			err = -EINVAL;
2657 			goto error_cleanup;
2658 		}
2659 		DEBUG(memset(digest_result, 0xff, digest_length));
2660 
2661 		digest_tcfg.tid = CRYPTOCOP_IOCTL_DIGEST_TID;
2662 		digest_tcfg.inject_ix = 0;
2663 		ciph_tcfg.inject_ix += digest_length;
2664 		if ((oper.digest_start < 0) || (oper.digest_len <= 0) || (oper.digest_start > oper.inlen) || ((oper.digest_start + oper.digest_len) > oper.inlen)){
2665 			DEBUG_API(printk("cryptocop_ioctl_process: bad digest length\n"));
2666 			err = -EINVAL;
2667 			goto error_cleanup;
2668 		}
2669 
2670 		digest_tcfg.next = cop->tfrm_op.tfrm_cfg;
2671 		cop->tfrm_op.tfrm_cfg = &digest_tcfg;
2672 	}
2673 	if (oper.do_csum){
2674 		csum_tcfg.tid = CRYPTOCOP_IOCTL_CSUM_TID;
2675 		csum_tcfg.inject_ix = digest_length;
2676 		ciph_tcfg.inject_ix += 2;
2677 
2678 		if ((oper.csum_start < 0) || (oper.csum_len <= 0) || (oper.csum_start > oper.inlen) || ((oper.csum_start + oper.csum_len) > oper.inlen)){
2679 			DEBUG_API(printk("cryptocop_ioctl_process: bad csum length\n"));
2680 			kfree(cop);
2681 			kfree(jc);
2682 			return -EINVAL;
2683 		}
2684 
2685 		csum_tcfg.next = cop->tfrm_op.tfrm_cfg;
2686 		cop->tfrm_op.tfrm_cfg = &csum_tcfg;
2687 	}
2688 
2689 	prev_ix = first_cfg_change_ix(&oper);
2690 	if (prev_ix > oper.inlen) {
2691 		DEBUG_API(printk("cryptocop_ioctl_process: length mismatch\n"));
2692 		nooutpages = noinpages = 0;
2693 		err = -EINVAL;
2694 		goto error_cleanup;
2695 	}
2696 	DEBUG(printk("cryptocop_ioctl_process: inlen=%d, cipher_outlen=%d\n", oper.inlen, oper.cipher_outlen));
2697 
2698 	/* Map user pages for in and out data of the operation. */
2699 	noinpages = (((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK) + oper.inlen - 1 - prev_ix + ~PAGE_MASK) >> PAGE_SHIFT;
2700 	DEBUG(printk("cryptocop_ioctl_process: noinpages=%d\n", noinpages));
2701 	inpages = kmalloc(noinpages * sizeof(struct page*), GFP_KERNEL);
2702 	if (!inpages){
2703 		DEBUG_API(printk("cryptocop_ioctl_process: kmalloc inpages\n"));
2704 		nooutpages = noinpages = 0;
2705 		err = -ENOMEM;
2706 		goto error_cleanup;
2707 	}
2708 	if (oper.do_cipher){
2709 		nooutpages = (((unsigned long int)oper.cipher_outdata & ~PAGE_MASK) + oper.cipher_outlen - 1 + ~PAGE_MASK) >> PAGE_SHIFT;
2710 		DEBUG(printk("cryptocop_ioctl_process: nooutpages=%d\n", nooutpages));
2711 		outpages = kmalloc(nooutpages * sizeof(struct page*), GFP_KERNEL);
2712 		if (!outpages){
2713 			DEBUG_API(printk("cryptocop_ioctl_process: kmalloc outpages\n"));
2714 			nooutpages = noinpages = 0;
2715 			err = -ENOMEM;
2716 			goto error_cleanup;
2717 		}
2718 	}
2719 
2720 	/* Acquire the mm page semaphore. */
2721 	down_read(&current->mm->mmap_sem);
2722 
2723 	err = get_user_pages((unsigned long int)(oper.indata + prev_ix),
2724 			     noinpages,
2725 			     0,  /* read access only for in data */
2726 			     inpages,
2727 			     NULL);
2728 
2729 	if (err < 0) {
2730 		up_read(&current->mm->mmap_sem);
2731 		nooutpages = noinpages = 0;
2732 		DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages indata\n"));
2733 		goto error_cleanup;
2734 	}
2735 	noinpages = err;
2736 	if (oper.do_cipher){
2737 		err = get_user_pages((unsigned long int)oper.cipher_outdata,
2738 				     nooutpages,
2739 				     FOLL_WRITE, /* write access for out data */
2740 				     outpages,
2741 				     NULL);
2742 		up_read(&current->mm->mmap_sem);
2743 		if (err < 0) {
2744 			nooutpages = 0;
2745 			DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages outdata\n"));
2746 			goto error_cleanup;
2747 		}
2748 		nooutpages = err;
2749 	} else {
2750 		up_read(&current->mm->mmap_sem);
2751 	}
2752 
2753 	/* Add 6 to nooutpages to make room for possibly inserted buffers for storing digest and
2754 	 * csum output and splits when units are (dis-)connected. */
2755 	cop->tfrm_op.indata = kmalloc((noinpages) * sizeof(struct iovec), GFP_KERNEL);
2756 	cop->tfrm_op.outdata = kmalloc((6 + nooutpages) * sizeof(struct iovec), GFP_KERNEL);
2757 	if (!cop->tfrm_op.indata || !cop->tfrm_op.outdata) {
2758 		DEBUG_API(printk("cryptocop_ioctl_process: kmalloc iovecs\n"));
2759 		err = -ENOMEM;
2760 		goto error_cleanup;
2761 	}
2762 
2763 	cop->tfrm_op.inlen = oper.inlen - prev_ix;
2764 	cop->tfrm_op.outlen = 0;
2765 	if (oper.do_cipher) cop->tfrm_op.outlen += oper.cipher_outlen;
2766 	if (oper.do_digest) cop->tfrm_op.outlen += digest_length;
2767 	if (oper.do_csum) cop->tfrm_op.outlen += 2;
2768 
2769 	/* Setup the in iovecs. */
2770 	cop->tfrm_op.incount = noinpages;
2771 	if (noinpages > 1){
2772 		size_t tmplen = cop->tfrm_op.inlen;
2773 
2774 		cop->tfrm_op.indata[0].iov_len = PAGE_SIZE - ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK);
2775 		cop->tfrm_op.indata[0].iov_base = (unsigned char*)page_address(inpages[0]) + ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK);
2776 		tmplen -= cop->tfrm_op.indata[0].iov_len;
2777 		for (i = 1; i<noinpages; i++){
2778 			cop->tfrm_op.indata[i].iov_len = tmplen < PAGE_SIZE ? tmplen : PAGE_SIZE;
2779 			cop->tfrm_op.indata[i].iov_base = (unsigned char*)page_address(inpages[i]);
2780 			tmplen -= PAGE_SIZE;
2781 		}
2782 	} else {
2783 		cop->tfrm_op.indata[0].iov_len = oper.inlen - prev_ix;
2784 		cop->tfrm_op.indata[0].iov_base = (unsigned char*)page_address(inpages[0]) + ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK);
2785 	}
2786 
2787 	iovlen = nooutpages + 6;
2788 	pageoffset = oper.do_cipher ? ((unsigned long int)oper.cipher_outdata & ~PAGE_MASK) : 0;
2789 
2790 	next_ix = next_cfg_change_ix(&oper, prev_ix);
2791 	if (prev_ix == next_ix){
2792 		DEBUG_API(printk("cryptocop_ioctl_process: length configuration broken.\n"));
2793 		err = -EINVAL;  /* This should be impossible barring bugs. */
2794 		goto error_cleanup;
2795 	}
2796 	while (prev_ix != next_ix){
2797 		end_digest = end_csum = cipher_active = digest_active = csum_active = 0;
2798 		descs[desc_ix].cfg = NULL;
2799 		descs[desc_ix].length = next_ix - prev_ix;
2800 
2801 		if (oper.do_cipher && (oper.cipher_start < next_ix) && (prev_ix < (oper.cipher_start + oper.cipher_len))) {
2802 			dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_CIPHER_TID;
2803 			dcfgs[dcfg_ix].src = cryptocop_source_dma;
2804 			cipher_active = 1;
2805 
2806 			if (next_ix == (oper.cipher_start + oper.cipher_len)){
2807 				cipher_done = 1;
2808 				dcfgs[dcfg_ix].last = 1;
2809 			} else {
2810 				dcfgs[dcfg_ix].last = 0;
2811 			}
2812 			dcfgs[dcfg_ix].next = descs[desc_ix].cfg;
2813 			descs[desc_ix].cfg = &dcfgs[dcfg_ix];
2814 			++dcfg_ix;
2815 		}
2816 		if (oper.do_digest && (oper.digest_start < next_ix) && (prev_ix < (oper.digest_start + oper.digest_len))) {
2817 			digest_active = 1;
2818 			dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_DIGEST_TID;
2819 			dcfgs[dcfg_ix].src = cryptocop_source_dma;
2820 			if (next_ix == (oper.digest_start + oper.digest_len)){
2821 				assert(!digest_done);
2822 				digest_done = 1;
2823 				dcfgs[dcfg_ix].last = 1;
2824 			} else {
2825 				dcfgs[dcfg_ix].last = 0;
2826 			}
2827 			dcfgs[dcfg_ix].next = descs[desc_ix].cfg;
2828 			descs[desc_ix].cfg = &dcfgs[dcfg_ix];
2829 			++dcfg_ix;
2830 		}
2831 		if (oper.do_csum && (oper.csum_start < next_ix) && (prev_ix < (oper.csum_start + oper.csum_len))){
2832 			csum_active = 1;
2833 			dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_CSUM_TID;
2834 			dcfgs[dcfg_ix].src = cryptocop_source_dma;
2835 			if (next_ix == (oper.csum_start + oper.csum_len)){
2836 				csum_done = 1;
2837 				dcfgs[dcfg_ix].last = 1;
2838 			} else {
2839 				dcfgs[dcfg_ix].last = 0;
2840 			}
2841 			dcfgs[dcfg_ix].next = descs[desc_ix].cfg;
2842 			descs[desc_ix].cfg = &dcfgs[dcfg_ix];
2843 			++dcfg_ix;
2844 		}
2845 		if (!descs[desc_ix].cfg){
2846 			DEBUG_API(printk("cryptocop_ioctl_process: data segment %d (%d to %d) had no active transforms\n", desc_ix, prev_ix, next_ix));
2847 			err = -EINVAL;
2848 			goto error_cleanup;
2849 		}
2850 		descs[desc_ix].next = &(descs[desc_ix]) + 1;
2851 		++desc_ix;
2852 		prev_ix = next_ix;
2853 		next_ix = next_cfg_change_ix(&oper, prev_ix);
2854 	}
2855 	if (desc_ix > 0){
2856 		descs[desc_ix-1].next = NULL;
2857 	} else {
2858 		descs[0].next = NULL;
2859 	}
2860 	if (oper.do_digest) {
2861 		DEBUG(printk("cryptocop_ioctl_process: mapping %d byte digest output to iovec %d\n", digest_length, iovix));
2862 		/* Add outdata iovec, length == <length of type of digest> */
2863 		cop->tfrm_op.outdata[iovix].iov_base = digest_result;
2864 		cop->tfrm_op.outdata[iovix].iov_len = digest_length;
2865 		++iovix;
2866 	}
2867 	if (oper.do_csum) {
2868 		/* Add outdata iovec, length == 2, the length of csum. */
2869 		DEBUG(printk("cryptocop_ioctl_process: mapping 2 byte csum output to iovec %d\n", iovix));
2870 		/* Add outdata iovec, length == <length of type of digest> */
2871 		cop->tfrm_op.outdata[iovix].iov_base = csum_result;
2872 		cop->tfrm_op.outdata[iovix].iov_len = 2;
2873 		++iovix;
2874 	}
2875 	if (oper.do_cipher) {
2876 		if (!map_pages_to_iovec(cop->tfrm_op.outdata, iovlen, &iovix, outpages, nooutpages, &pageix, &pageoffset, oper.cipher_outlen)){
2877 			DEBUG_API(printk("cryptocop_ioctl_process: failed to map pages to iovec.\n"));
2878 			err = -ENOSYS; /* This should be impossible barring bugs. */
2879 			goto error_cleanup;
2880 		}
2881 	}
2882 	DEBUG(printk("cryptocop_ioctl_process: setting cop->tfrm_op.outcount %d\n", iovix));
2883 	cop->tfrm_op.outcount = iovix;
2884 	assert(iovix <= (nooutpages + 6));
2885 
2886 	cop->sid = oper.ses_id;
2887 	cop->tfrm_op.desc = &descs[0];
2888 
2889 	DEBUG(printk("cryptocop_ioctl_process: inserting job, cb_data=0x%p\n", cop->cb_data));
2890 
2891 	if ((err = cryptocop_job_queue_insert_user_job(cop)) != 0) {
2892 		DEBUG_API(printk("cryptocop_ioctl_process: insert job %d\n", err));
2893 		err = -EINVAL;
2894 		goto error_cleanup;
2895 	}
2896 
2897 	DEBUG(printk("cryptocop_ioctl_process: begin wait for result\n"));
2898 
2899 	wait_event(cryptocop_ioc_process_wq, (jc->processed != 0));
2900 	DEBUG(printk("cryptocop_ioctl_process: end wait for result\n"));
2901         if (!jc->processed){
2902 		printk(KERN_WARNING "cryptocop_ioctl_process: job not processed at completion\n");
2903 		err = -EIO;
2904 		goto error_cleanup;
2905 	}
2906 
2907 	/* Job process done.  Cipher output should already be correct in job so no post processing of outdata. */
2908 	DEBUG(printk("cryptocop_ioctl_process: operation_status = %d\n", cop->operation_status));
2909 	if (cop->operation_status == 0){
2910 		if (oper.do_digest){
2911 			DEBUG(printk("cryptocop_ioctl_process: copy %d bytes digest to user\n", digest_length));
2912 			err = copy_to_user((unsigned char*)crp_oper + offsetof(struct strcop_crypto_op, digest), digest_result, digest_length);
2913 			if (0 != err){
2914 				DEBUG_API(printk("cryptocop_ioctl_process: copy_to_user, digest length %d, err %d\n", digest_length, err));
2915 				err = -EFAULT;
2916 				goto error_cleanup;
2917 			}
2918 		}
2919 		if (oper.do_csum){
2920 			DEBUG(printk("cryptocop_ioctl_process: copy 2 bytes checksum to user\n"));
2921 			err = copy_to_user((unsigned char*)crp_oper + offsetof(struct strcop_crypto_op, csum), csum_result, 2);
2922 			if (0 != err){
2923 				DEBUG_API(printk("cryptocop_ioctl_process: copy_to_user, csum, err %d\n", err));
2924 				err = -EFAULT;
2925 				goto error_cleanup;
2926 			}
2927 		}
2928 		err = 0;
2929 	} else {
2930 		DEBUG(printk("cryptocop_ioctl_process: returning err = operation_status = %d\n", cop->operation_status));
2931 		err = cop->operation_status;
2932 	}
2933 
2934  error_cleanup:
2935 	/* Release page caches. */
2936 	for (i = 0; i < noinpages; i++){
2937 		put_page(inpages[i]);
2938 	}
2939 	for (i = 0; i < nooutpages; i++){
2940 		int spdl_err;
2941 		/* Mark output pages dirty. */
2942 		spdl_err = set_page_dirty_lock(outpages[i]);
2943 		DEBUG(if (spdl_err < 0)printk("cryptocop_ioctl_process: set_page_dirty_lock returned %d\n", spdl_err));
2944 	}
2945 	for (i = 0; i < nooutpages; i++){
2946 		put_page(outpages[i]);
2947 	}
2948 
2949 	kfree(digest_result);
2950 	kfree(inpages);
2951 	kfree(outpages);
2952 	if (cop){
2953 		kfree(cop->tfrm_op.indata);
2954 		kfree(cop->tfrm_op.outdata);
2955 		kfree(cop);
2956 	}
2957 	kfree(jc);
2958 
2959 	DEBUG(print_lock_status());
2960 
2961 	return err;
2962 }
2963 
2964 
cryptocop_ioctl_create_session(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)2965 static int cryptocop_ioctl_create_session(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2966 {
2967 	cryptocop_session_id             sid;
2968 	int                              err;
2969 	struct cryptocop_private         *dev;
2970 	struct strcop_session_op         *sess_op = (struct strcop_session_op *)arg;
2971 	struct strcop_session_op         sop;
2972 	struct cryptocop_transform_init  *tis = NULL;
2973 	struct cryptocop_transform_init  ti_cipher = {0};
2974 	struct cryptocop_transform_init  ti_digest = {0};
2975 	struct cryptocop_transform_init  ti_csum = {0};
2976 
2977 	if (!access_ok(VERIFY_WRITE, sess_op, sizeof(struct strcop_session_op)))
2978 		return -EFAULT;
2979 	err = copy_from_user(&sop, sess_op, sizeof(struct strcop_session_op));
2980 	if (err) return -EFAULT;
2981 	if (sop.cipher != cryptocop_cipher_none) {
2982 		if (!access_ok(VERIFY_READ, sop.key, sop.keylen)) return -EFAULT;
2983 	}
2984 	DEBUG(printk("cryptocop_ioctl_create_session, sess_op:\n"));
2985 
2986 	DEBUG(printk("\tcipher:%d\n"
2987 		     "\tcipher_mode:%d\n"
2988 		     "\tdigest:%d\n"
2989 		     "\tcsum:%d\n",
2990 		     (int)sop.cipher,
2991 		     (int)sop.cmode,
2992 		     (int)sop.digest,
2993 		     (int)sop.csum));
2994 
2995 	if (sop.cipher != cryptocop_cipher_none){
2996 		/* Init the cipher. */
2997 		switch (sop.cipher){
2998 		case cryptocop_cipher_des:
2999 			ti_cipher.alg = cryptocop_alg_des;
3000 			break;
3001 		case cryptocop_cipher_3des:
3002 			ti_cipher.alg = cryptocop_alg_3des;
3003 			break;
3004 		case cryptocop_cipher_aes:
3005 			ti_cipher.alg = cryptocop_alg_aes;
3006 			break;
3007 		default:
3008 			DEBUG_API(printk("create session, bad cipher algorithm %d\n", sop.cipher));
3009 			return -EINVAL;
3010 		};
3011 		DEBUG(printk("setting cipher transform %d\n", ti_cipher.alg));
3012 		copy_from_user(ti_cipher.key, sop.key, sop.keylen/8);
3013 		ti_cipher.keylen = sop.keylen;
3014 		switch (sop.cmode){
3015 		case cryptocop_cipher_mode_cbc:
3016 		case cryptocop_cipher_mode_ecb:
3017 			ti_cipher.cipher_mode = sop.cmode;
3018 			break;
3019 		default:
3020 			DEBUG_API(printk("create session, bad cipher mode %d\n", sop.cmode));
3021 			return -EINVAL;
3022 		}
3023 		DEBUG(printk("cryptocop_ioctl_create_session: setting CBC mode %d\n", ti_cipher.cipher_mode));
3024 		switch (sop.des3_mode){
3025 		case cryptocop_3des_eee:
3026 		case cryptocop_3des_eed:
3027 		case cryptocop_3des_ede:
3028 		case cryptocop_3des_edd:
3029 		case cryptocop_3des_dee:
3030 		case cryptocop_3des_ded:
3031 		case cryptocop_3des_dde:
3032 		case cryptocop_3des_ddd:
3033 			ti_cipher.tdes_mode = sop.des3_mode;
3034 			break;
3035 		default:
3036 			DEBUG_API(printk("create session, bad 3DES mode %d\n", sop.des3_mode));
3037 			return -EINVAL;
3038 		}
3039 		ti_cipher.tid = CRYPTOCOP_IOCTL_CIPHER_TID;
3040 		ti_cipher.next = tis;
3041 		tis = &ti_cipher;
3042 	} /* if (sop.cipher != cryptocop_cipher_none) */
3043 	if (sop.digest != cryptocop_digest_none){
3044 		DEBUG(printk("setting digest transform\n"));
3045 		switch (sop.digest){
3046 		case cryptocop_digest_md5:
3047 			ti_digest.alg = cryptocop_alg_md5;
3048 			break;
3049 		case cryptocop_digest_sha1:
3050 			ti_digest.alg = cryptocop_alg_sha1;
3051 			break;
3052 		default:
3053 			DEBUG_API(printk("create session, bad digest algorithm %d\n", sop.digest));
3054 			return -EINVAL;
3055 		}
3056 		ti_digest.tid = CRYPTOCOP_IOCTL_DIGEST_TID;
3057 		ti_digest.next = tis;
3058 		tis = &ti_digest;
3059 	} /* if (sop.digest != cryptocop_digest_none) */
3060 	if (sop.csum != cryptocop_csum_none){
3061 		DEBUG(printk("setting csum transform\n"));
3062 		switch (sop.csum){
3063 		case cryptocop_csum_le:
3064 		case cryptocop_csum_be:
3065 			ti_csum.csum_mode = sop.csum;
3066 			break;
3067 		default:
3068 			DEBUG_API(printk("create session, bad checksum algorithm %d\n", sop.csum));
3069 			return -EINVAL;
3070 		}
3071 		ti_csum.alg = cryptocop_alg_csum;
3072 		ti_csum.tid = CRYPTOCOP_IOCTL_CSUM_TID;
3073 		ti_csum.next = tis;
3074 		tis = &ti_csum;
3075 	} /* (sop.csum != cryptocop_csum_none) */
3076 	dev = kmalloc(sizeof(struct cryptocop_private), GFP_KERNEL);
3077 	if (!dev){
3078 		DEBUG_API(printk("create session, alloc dev\n"));
3079 		return -ENOMEM;
3080 	}
3081 
3082 	err = cryptocop_new_session(&sid, tis, GFP_KERNEL);
3083 	DEBUG({ if (err) printk("create session, cryptocop_new_session %d\n", err);});
3084 
3085 	if (err) {
3086 		kfree(dev);
3087 		return err;
3088 	}
3089 	sess_op->ses_id = sid;
3090 	dev->sid = sid;
3091 	dev->next = filp->private_data;
3092 	filp->private_data = dev;
3093 
3094 	return 0;
3095 }
3096 
cryptocop_ioctl_unlocked(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)3097 static long cryptocop_ioctl_unlocked(struct inode *inode,
3098 	struct file *filp, unsigned int cmd, unsigned long arg)
3099 {
3100 	int err = 0;
3101 	if (_IOC_TYPE(cmd) != ETRAXCRYPTOCOP_IOCTYPE) {
3102 		DEBUG_API(printk("cryptocop_ioctl: wrong type\n"));
3103 		return -ENOTTY;
3104 	}
3105 	if (_IOC_NR(cmd) > CRYPTOCOP_IO_MAXNR){
3106 		return -ENOTTY;
3107 	}
3108 	/* Access check of the argument.  Some commands, e.g. create session and process op,
3109 	   needs additional checks.  Those are handled in the command handling functions. */
3110 	if (_IOC_DIR(cmd) & _IOC_READ)
3111 		err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd));
3112 	else if (_IOC_DIR(cmd) & _IOC_WRITE)
3113 		err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd));
3114 	if (err) return -EFAULT;
3115 
3116 	switch (cmd) {
3117 	case CRYPTOCOP_IO_CREATE_SESSION:
3118 		return cryptocop_ioctl_create_session(inode, filp, cmd, arg);
3119 	case CRYPTOCOP_IO_CLOSE_SESSION:
3120 		return cryptocop_ioctl_close_session(inode, filp, cmd, arg);
3121 	case CRYPTOCOP_IO_PROCESS_OP:
3122 		return cryptocop_ioctl_process(inode, filp, cmd, arg);
3123 	default:
3124 		DEBUG_API(printk("cryptocop_ioctl: unknown command\n"));
3125 		return -ENOTTY;
3126 	}
3127 	return 0;
3128 }
3129 
3130 static long
cryptocop_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)3131 cryptocop_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3132 {
3133        long ret;
3134 
3135        mutex_lock(&cryptocop_mutex);
3136        ret = cryptocop_ioctl_unlocked(file_inode(filp), filp, cmd, arg);
3137        mutex_unlock(&cryptocop_mutex);
3138 
3139        return ret;
3140 }
3141 
3142 
3143 #ifdef LDEBUG
print_dma_descriptors(struct cryptocop_int_operation * iop)3144 static void print_dma_descriptors(struct cryptocop_int_operation *iop)
3145 {
3146 	struct cryptocop_dma_desc *cdesc_out = iop->cdesc_out;
3147 	struct cryptocop_dma_desc *cdesc_in = iop->cdesc_in;
3148 	int                       i;
3149 
3150 	printk("print_dma_descriptors start\n");
3151 
3152 	printk("iop:\n");
3153 	printk("\tsid: 0x%llx\n", iop->sid);
3154 
3155 	printk("\tcdesc_out: 0x%p\n", iop->cdesc_out);
3156 	printk("\tcdesc_in: 0x%p\n", iop->cdesc_in);
3157 	printk("\tddesc_out: 0x%p\n", iop->ddesc_out);
3158 	printk("\tddesc_in: 0x%p\n", iop->ddesc_in);
3159 
3160 	printk("\niop->ctx_out: 0x%p phys: 0x%p\n", &iop->ctx_out, (char*)virt_to_phys(&iop->ctx_out));
3161 	printk("\tnext: 0x%p\n"
3162 	       "\tsaved_data: 0x%p\n"
3163 	       "\tsaved_data_buf: 0x%p\n",
3164 	       iop->ctx_out.next,
3165 	       iop->ctx_out.saved_data,
3166 	       iop->ctx_out.saved_data_buf);
3167 
3168 	printk("\niop->ctx_in: 0x%p phys: 0x%p\n", &iop->ctx_in, (char*)virt_to_phys(&iop->ctx_in));
3169 	printk("\tnext: 0x%p\n"
3170 	       "\tsaved_data: 0x%p\n"
3171 	       "\tsaved_data_buf: 0x%p\n",
3172 	       iop->ctx_in.next,
3173 	       iop->ctx_in.saved_data,
3174 	       iop->ctx_in.saved_data_buf);
3175 
3176 	i = 0;
3177 	while (cdesc_out) {
3178 		dma_descr_data *td;
3179 		printk("cdesc_out %d, desc=0x%p\n", i, cdesc_out->dma_descr);
3180 		printk("\n\tvirt_to_phys(desc): 0x%p\n", (char*)virt_to_phys(cdesc_out->dma_descr));
3181 		td = cdesc_out->dma_descr;
3182 		printk("\n\tbuf: 0x%p\n"
3183 		       "\tafter: 0x%p\n"
3184 		       "\tmd: 0x%04x\n"
3185 		       "\tnext: 0x%p\n",
3186 		       td->buf,
3187 		       td->after,
3188 		       td->md,
3189 		       td->next);
3190 		printk("flags:\n"
3191 		       "\twait:\t%d\n"
3192 		       "\teol:\t%d\n"
3193 		       "\touteop:\t%d\n"
3194 		       "\tineop:\t%d\n"
3195 		       "\tintr:\t%d\n",
3196 		       td->wait,
3197 		       td->eol,
3198 		       td->out_eop,
3199 		       td->in_eop,
3200 		       td->intr);
3201 		cdesc_out = cdesc_out->next;
3202 		i++;
3203 	}
3204 	i = 0;
3205 	while (cdesc_in) {
3206 		dma_descr_data *td;
3207 		printk("cdesc_in %d, desc=0x%p\n", i, cdesc_in->dma_descr);
3208 		printk("\n\tvirt_to_phys(desc): 0x%p\n", (char*)virt_to_phys(cdesc_in->dma_descr));
3209 		td = cdesc_in->dma_descr;
3210 		printk("\n\tbuf: 0x%p\n"
3211 		       "\tafter: 0x%p\n"
3212 		       "\tmd: 0x%04x\n"
3213 		       "\tnext: 0x%p\n",
3214 		       td->buf,
3215 		       td->after,
3216 		       td->md,
3217 		       td->next);
3218 		printk("flags:\n"
3219 		       "\twait:\t%d\n"
3220 		       "\teol:\t%d\n"
3221 		       "\touteop:\t%d\n"
3222 		       "\tineop:\t%d\n"
3223 		       "\tintr:\t%d\n",
3224 		       td->wait,
3225 		       td->eol,
3226 		       td->out_eop,
3227 		       td->in_eop,
3228 		       td->intr);
3229 		cdesc_in = cdesc_in->next;
3230 		i++;
3231 	}
3232 
3233 	printk("print_dma_descriptors end\n");
3234 }
3235 
3236 
print_strcop_crypto_op(struct strcop_crypto_op * cop)3237 static void print_strcop_crypto_op(struct strcop_crypto_op *cop)
3238 {
3239 	printk("print_strcop_crypto_op, 0x%p\n", cop);
3240 
3241 	/* Indata. */
3242 	printk("indata=0x%p\n"
3243 	       "inlen=%d\n"
3244 	       "do_cipher=%d\n"
3245 	       "decrypt=%d\n"
3246 	       "cipher_explicit=%d\n"
3247 	       "cipher_start=%d\n"
3248 	       "cipher_len=%d\n"
3249 	       "outdata=0x%p\n"
3250 	       "outlen=%d\n",
3251 	       cop->indata,
3252 	       cop->inlen,
3253 	       cop->do_cipher,
3254 	       cop->decrypt,
3255 	       cop->cipher_explicit,
3256 	       cop->cipher_start,
3257 	       cop->cipher_len,
3258 	       cop->cipher_outdata,
3259 	       cop->cipher_outlen);
3260 
3261 	printk("do_digest=%d\n"
3262 	       "digest_start=%d\n"
3263 	       "digest_len=%d\n",
3264 	       cop->do_digest,
3265 	       cop->digest_start,
3266 	       cop->digest_len);
3267 
3268 	printk("do_csum=%d\n"
3269 	       "csum_start=%d\n"
3270 	       "csum_len=%d\n",
3271 	       cop->do_csum,
3272 	       cop->csum_start,
3273 	       cop->csum_len);
3274 }
3275 
print_cryptocop_operation(struct cryptocop_operation * cop)3276 static void print_cryptocop_operation(struct cryptocop_operation *cop)
3277 {
3278 	struct cryptocop_desc      *d;
3279 	struct cryptocop_tfrm_cfg  *tc;
3280 	struct cryptocop_desc_cfg  *dc;
3281 	int                        i;
3282 
3283 	printk("print_cryptocop_operation, cop=0x%p\n\n", cop);
3284 	printk("sid: %lld\n", cop->sid);
3285 	printk("operation_status=%d\n"
3286 	       "use_dmalists=%d\n"
3287 	       "in_interrupt=%d\n"
3288 	       "fast_callback=%d\n",
3289 	       cop->operation_status,
3290 	       cop->use_dmalists,
3291 	       cop->in_interrupt,
3292 	       cop->fast_callback);
3293 
3294 	if (cop->use_dmalists){
3295 		print_user_dma_lists(&cop->list_op);
3296 	} else {
3297 		printk("cop->tfrm_op\n"
3298 		       "tfrm_cfg=0x%p\n"
3299 		       "desc=0x%p\n"
3300 		       "indata=0x%p\n"
3301 		       "incount=%d\n"
3302 		       "inlen=%d\n"
3303 		       "outdata=0x%p\n"
3304 		       "outcount=%d\n"
3305 		       "outlen=%d\n\n",
3306 		       cop->tfrm_op.tfrm_cfg,
3307 		       cop->tfrm_op.desc,
3308 		       cop->tfrm_op.indata,
3309 		       cop->tfrm_op.incount,
3310 		       cop->tfrm_op.inlen,
3311 		       cop->tfrm_op.outdata,
3312 		       cop->tfrm_op.outcount,
3313 		       cop->tfrm_op.outlen);
3314 
3315 		tc = cop->tfrm_op.tfrm_cfg;
3316 		while (tc){
3317 			printk("tfrm_cfg, 0x%p\n"
3318 			       "tid=%d\n"
3319 			       "flags=%d\n"
3320 			       "inject_ix=%d\n"
3321 			       "next=0x%p\n",
3322 			       tc,
3323 			       tc->tid,
3324 			       tc->flags,
3325 			       tc->inject_ix,
3326 			       tc->next);
3327 			tc = tc->next;
3328 		}
3329 		d = cop->tfrm_op.desc;
3330 		while (d){
3331 			printk("\n======================desc, 0x%p\n"
3332 			       "length=%d\n"
3333 			       "cfg=0x%p\n"
3334 			       "next=0x%p\n",
3335 			       d,
3336 			       d->length,
3337 			       d->cfg,
3338 			       d->next);
3339 			dc = d->cfg;
3340 			while (dc){
3341 				printk("=========desc_cfg, 0x%p\n"
3342 				       "tid=%d\n"
3343 				       "src=%d\n"
3344 				       "last=%d\n"
3345 				       "next=0x%p\n",
3346 				       dc,
3347 				       dc->tid,
3348 				       dc->src,
3349 				       dc->last,
3350 				       dc->next);
3351 				dc = dc->next;
3352 			}
3353 			d = d->next;
3354 		}
3355 		printk("\n====iniov\n");
3356 		for (i = 0; i < cop->tfrm_op.incount; i++){
3357 			printk("indata[%d]\n"
3358 			       "base=0x%p\n"
3359 			       "len=%d\n",
3360 			       i,
3361 			       cop->tfrm_op.indata[i].iov_base,
3362 			       cop->tfrm_op.indata[i].iov_len);
3363 		}
3364 		printk("\n====outiov\n");
3365 		for (i = 0; i < cop->tfrm_op.outcount; i++){
3366 			printk("outdata[%d]\n"
3367 			       "base=0x%p\n"
3368 			       "len=%d\n",
3369 			       i,
3370 			       cop->tfrm_op.outdata[i].iov_base,
3371 			       cop->tfrm_op.outdata[i].iov_len);
3372 		}
3373 	}
3374 	printk("------------end print_cryptocop_operation\n");
3375 }
3376 
3377 
print_user_dma_lists(struct cryptocop_dma_list_operation * dma_op)3378 static void print_user_dma_lists(struct cryptocop_dma_list_operation *dma_op)
3379 {
3380 	dma_descr_data *dd;
3381 	int i;
3382 
3383 	printk("print_user_dma_lists, dma_op=0x%p\n", dma_op);
3384 
3385 	printk("out_data_buf = 0x%p, phys_to_virt(out_data_buf) = 0x%p\n", dma_op->out_data_buf, phys_to_virt((unsigned long int)dma_op->out_data_buf));
3386 	printk("in_data_buf = 0x%p, phys_to_virt(in_data_buf) = 0x%p\n", dma_op->in_data_buf, phys_to_virt((unsigned long int)dma_op->in_data_buf));
3387 
3388 	printk("##############outlist\n");
3389 	dd = phys_to_virt((unsigned long int)dma_op->outlist);
3390 	i = 0;
3391 	while (dd != NULL) {
3392 		printk("#%d phys_to_virt(desc) 0x%p\n", i, dd);
3393 		printk("\n\tbuf: 0x%p\n"
3394 		       "\tafter: 0x%p\n"
3395 		       "\tmd: 0x%04x\n"
3396 		       "\tnext: 0x%p\n",
3397 		       dd->buf,
3398 		       dd->after,
3399 		       dd->md,
3400 		       dd->next);
3401 		printk("flags:\n"
3402 		       "\twait:\t%d\n"
3403 		       "\teol:\t%d\n"
3404 		       "\touteop:\t%d\n"
3405 		       "\tineop:\t%d\n"
3406 		       "\tintr:\t%d\n",
3407 		       dd->wait,
3408 		       dd->eol,
3409 		       dd->out_eop,
3410 		       dd->in_eop,
3411 		       dd->intr);
3412 		if (dd->eol)
3413 			dd = NULL;
3414 		else
3415 			dd = phys_to_virt((unsigned long int)dd->next);
3416 		++i;
3417 	}
3418 
3419 	printk("##############inlist\n");
3420 	dd = phys_to_virt((unsigned long int)dma_op->inlist);
3421 	i = 0;
3422 	while (dd != NULL) {
3423 		printk("#%d phys_to_virt(desc) 0x%p\n", i, dd);
3424 		printk("\n\tbuf: 0x%p\n"
3425 		       "\tafter: 0x%p\n"
3426 		       "\tmd: 0x%04x\n"
3427 		       "\tnext: 0x%p\n",
3428 		       dd->buf,
3429 		       dd->after,
3430 		       dd->md,
3431 		       dd->next);
3432 		printk("flags:\n"
3433 		       "\twait:\t%d\n"
3434 		       "\teol:\t%d\n"
3435 		       "\touteop:\t%d\n"
3436 		       "\tineop:\t%d\n"
3437 		       "\tintr:\t%d\n",
3438 		       dd->wait,
3439 		       dd->eol,
3440 		       dd->out_eop,
3441 		       dd->in_eop,
3442 		       dd->intr);
3443 		if (dd->eol)
3444 			dd = NULL;
3445 		else
3446 			dd = phys_to_virt((unsigned long int)dd->next);
3447 		++i;
3448 	}
3449 }
3450 
3451 
print_lock_status(void)3452 static void print_lock_status(void)
3453 {
3454 	printk("**********************print_lock_status\n");
3455 	printk("cryptocop_completed_jobs_lock %d\n", spin_is_locked(&cryptocop_completed_jobs_lock));
3456 	printk("cryptocop_job_queue_lock %d\n", spin_is_locked(&cryptocop_job_queue_lock));
3457 	printk("descr_pool_lock %d\n", spin_is_locked(&descr_pool_lock));
3458 	printk("cryptocop_sessions_lock %d\n", spin_is_locked(cryptocop_sessions_lock));
3459 	printk("running_job_lock %d\n", spin_is_locked(running_job_lock));
3460 	printk("cryptocop_process_lock %d\n", spin_is_locked(cryptocop_process_lock));
3461 }
3462 #endif /* LDEBUG */
3463 
3464 
3465 static const char cryptocop_name[] = "ETRAX FS stream co-processor";
3466 
init_stream_coprocessor(void)3467 static int init_stream_coprocessor(void)
3468 {
3469 	int err;
3470 	int i;
3471 	static int initialized = 0;
3472 
3473 	if (initialized)
3474 		return 0;
3475 
3476 	initialized = 1;
3477 
3478 	printk("ETRAX FS stream co-processor driver v0.01, (c) 2003 Axis Communications AB\n");
3479 
3480 	err = register_chrdev(CRYPTOCOP_MAJOR, cryptocop_name, &cryptocop_fops);
3481 	if (err < 0) {
3482 		printk(KERN_ERR "stream co-processor: could not get major number.\n");
3483 		return err;
3484 	}
3485 
3486 	err = init_cryptocop();
3487 	if (err) {
3488 		(void)unregister_chrdev(CRYPTOCOP_MAJOR, cryptocop_name);
3489 		return err;
3490 	}
3491 	err = cryptocop_job_queue_init();
3492 	if (err) {
3493 		release_cryptocop();
3494 		(void)unregister_chrdev(CRYPTOCOP_MAJOR, cryptocop_name);
3495 		return err;
3496 	}
3497 	/* Init the descriptor pool. */
3498 	for (i = 0; i < CRYPTOCOP_DESCRIPTOR_POOL_SIZE - 1; i++) {
3499 		descr_pool[i].from_pool = 1;
3500 		descr_pool[i].next = &descr_pool[i + 1];
3501 	}
3502 	descr_pool[i].from_pool = 1;
3503 	descr_pool[i].next = NULL;
3504 	descr_pool_free_list = &descr_pool[0];
3505 	descr_pool_no_free = CRYPTOCOP_DESCRIPTOR_POOL_SIZE;
3506 
3507 	spin_lock_init(&cryptocop_completed_jobs_lock);
3508 	spin_lock_init(&cryptocop_job_queue_lock);
3509 	spin_lock_init(&descr_pool_lock);
3510 	spin_lock_init(&cryptocop_sessions_lock);
3511 	spin_lock_init(&running_job_lock);
3512 	spin_lock_init(&cryptocop_process_lock);
3513 
3514 	cryptocop_sessions = NULL;
3515 	next_sid = 1;
3516 
3517 	cryptocop_running_job = NULL;
3518 
3519 	printk("stream co-processor: init done.\n");
3520 	return 0;
3521 }
3522 
exit_stream_coprocessor(void)3523 static void __exit exit_stream_coprocessor(void)
3524 {
3525 	release_cryptocop();
3526 	cryptocop_job_queue_close();
3527 }
3528 
3529 module_init(init_stream_coprocessor);
3530 module_exit(exit_stream_coprocessor);
3531 
3532