1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 *
5 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 *
7 * Scatterlist Crypto API glue code copied from files with the following:
8 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 *
10 * Crypto algorithm registration code copied from hifn driver:
11 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
12 * All rights reserved.
13 */
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mod_devicetable.h>
18 #include <linux/device.h>
19 #include <linux/interrupt.h>
20 #include <linux/crypto.h>
21 #include <linux/hw_random.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_platform.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/io.h>
27 #include <linux/spinlock.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/slab.h>
30
31 #include <crypto/algapi.h>
32 #include <crypto/aes.h>
33 #include <crypto/internal/des.h>
34 #include <crypto/sha.h>
35 #include <crypto/md5.h>
36 #include <crypto/internal/aead.h>
37 #include <crypto/authenc.h>
38 #include <crypto/internal/skcipher.h>
39 #include <crypto/hash.h>
40 #include <crypto/internal/hash.h>
41 #include <crypto/scatterwalk.h>
42
43 #include "talitos.h"
44
to_talitos_ptr(struct talitos_ptr * ptr,dma_addr_t dma_addr,unsigned int len,bool is_sec1)45 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
46 unsigned int len, bool is_sec1)
47 {
48 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
49 if (is_sec1) {
50 ptr->len1 = cpu_to_be16(len);
51 } else {
52 ptr->len = cpu_to_be16(len);
53 ptr->eptr = upper_32_bits(dma_addr);
54 }
55 }
56
copy_talitos_ptr(struct talitos_ptr * dst_ptr,struct talitos_ptr * src_ptr,bool is_sec1)57 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
58 struct talitos_ptr *src_ptr, bool is_sec1)
59 {
60 dst_ptr->ptr = src_ptr->ptr;
61 if (is_sec1) {
62 dst_ptr->len1 = src_ptr->len1;
63 } else {
64 dst_ptr->len = src_ptr->len;
65 dst_ptr->eptr = src_ptr->eptr;
66 }
67 }
68
from_talitos_ptr_len(struct talitos_ptr * ptr,bool is_sec1)69 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
70 bool is_sec1)
71 {
72 if (is_sec1)
73 return be16_to_cpu(ptr->len1);
74 else
75 return be16_to_cpu(ptr->len);
76 }
77
to_talitos_ptr_ext_set(struct talitos_ptr * ptr,u8 val,bool is_sec1)78 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
79 bool is_sec1)
80 {
81 if (!is_sec1)
82 ptr->j_extent = val;
83 }
84
to_talitos_ptr_ext_or(struct talitos_ptr * ptr,u8 val,bool is_sec1)85 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
86 {
87 if (!is_sec1)
88 ptr->j_extent |= val;
89 }
90
91 /*
92 * map virtual single (contiguous) pointer to h/w descriptor pointer
93 */
__map_single_talitos_ptr(struct device * dev,struct talitos_ptr * ptr,unsigned int len,void * data,enum dma_data_direction dir,unsigned long attrs)94 static void __map_single_talitos_ptr(struct device *dev,
95 struct talitos_ptr *ptr,
96 unsigned int len, void *data,
97 enum dma_data_direction dir,
98 unsigned long attrs)
99 {
100 dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
101 struct talitos_private *priv = dev_get_drvdata(dev);
102 bool is_sec1 = has_ftr_sec1(priv);
103
104 to_talitos_ptr(ptr, dma_addr, len, is_sec1);
105 }
106
map_single_talitos_ptr(struct device * dev,struct talitos_ptr * ptr,unsigned int len,void * data,enum dma_data_direction dir)107 static void map_single_talitos_ptr(struct device *dev,
108 struct talitos_ptr *ptr,
109 unsigned int len, void *data,
110 enum dma_data_direction dir)
111 {
112 __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
113 }
114
map_single_talitos_ptr_nosync(struct device * dev,struct talitos_ptr * ptr,unsigned int len,void * data,enum dma_data_direction dir)115 static void map_single_talitos_ptr_nosync(struct device *dev,
116 struct talitos_ptr *ptr,
117 unsigned int len, void *data,
118 enum dma_data_direction dir)
119 {
120 __map_single_talitos_ptr(dev, ptr, len, data, dir,
121 DMA_ATTR_SKIP_CPU_SYNC);
122 }
123
124 /*
125 * unmap bus single (contiguous) h/w descriptor pointer
126 */
unmap_single_talitos_ptr(struct device * dev,struct talitos_ptr * ptr,enum dma_data_direction dir)127 static void unmap_single_talitos_ptr(struct device *dev,
128 struct talitos_ptr *ptr,
129 enum dma_data_direction dir)
130 {
131 struct talitos_private *priv = dev_get_drvdata(dev);
132 bool is_sec1 = has_ftr_sec1(priv);
133
134 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
135 from_talitos_ptr_len(ptr, is_sec1), dir);
136 }
137
reset_channel(struct device * dev,int ch)138 static int reset_channel(struct device *dev, int ch)
139 {
140 struct talitos_private *priv = dev_get_drvdata(dev);
141 unsigned int timeout = TALITOS_TIMEOUT;
142 bool is_sec1 = has_ftr_sec1(priv);
143
144 if (is_sec1) {
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146 TALITOS1_CCCR_LO_RESET);
147
148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
149 TALITOS1_CCCR_LO_RESET) && --timeout)
150 cpu_relax();
151 } else {
152 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
153 TALITOS2_CCCR_RESET);
154
155 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
156 TALITOS2_CCCR_RESET) && --timeout)
157 cpu_relax();
158 }
159
160 if (timeout == 0) {
161 dev_err(dev, "failed to reset channel %d\n", ch);
162 return -EIO;
163 }
164
165 /* set 36-bit addressing, done writeback enable and done IRQ enable */
166 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
167 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
168 /* enable chaining descriptors */
169 if (is_sec1)
170 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
171 TALITOS_CCCR_LO_NE);
172
173 /* and ICCR writeback, if available */
174 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
175 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
176 TALITOS_CCCR_LO_IWSE);
177
178 return 0;
179 }
180
reset_device(struct device * dev)181 static int reset_device(struct device *dev)
182 {
183 struct talitos_private *priv = dev_get_drvdata(dev);
184 unsigned int timeout = TALITOS_TIMEOUT;
185 bool is_sec1 = has_ftr_sec1(priv);
186 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
187
188 setbits32(priv->reg + TALITOS_MCR, mcr);
189
190 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
191 && --timeout)
192 cpu_relax();
193
194 if (priv->irq[1]) {
195 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
196 setbits32(priv->reg + TALITOS_MCR, mcr);
197 }
198
199 if (timeout == 0) {
200 dev_err(dev, "failed to reset device\n");
201 return -EIO;
202 }
203
204 return 0;
205 }
206
207 /*
208 * Reset and initialize the device
209 */
init_device(struct device * dev)210 static int init_device(struct device *dev)
211 {
212 struct talitos_private *priv = dev_get_drvdata(dev);
213 int ch, err;
214 bool is_sec1 = has_ftr_sec1(priv);
215
216 /*
217 * Master reset
218 * errata documentation: warning: certain SEC interrupts
219 * are not fully cleared by writing the MCR:SWR bit,
220 * set bit twice to completely reset
221 */
222 err = reset_device(dev);
223 if (err)
224 return err;
225
226 err = reset_device(dev);
227 if (err)
228 return err;
229
230 /* reset channels */
231 for (ch = 0; ch < priv->num_channels; ch++) {
232 err = reset_channel(dev, ch);
233 if (err)
234 return err;
235 }
236
237 /* enable channel done and error interrupts */
238 if (is_sec1) {
239 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
240 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
241 /* disable parity error check in DEU (erroneous? test vect.) */
242 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
243 } else {
244 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
245 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
246 }
247
248 /* disable integrity check error interrupts (use writeback instead) */
249 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
250 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
251 TALITOS_MDEUICR_LO_ICE);
252
253 return 0;
254 }
255
256 /**
257 * talitos_submit - submits a descriptor to the device for processing
258 * @dev: the SEC device to be used
259 * @ch: the SEC device channel to be used
260 * @desc: the descriptor to be processed by the device
261 * @callback: whom to call when processing is complete
262 * @context: a handle for use by caller (optional)
263 *
264 * desc must contain valid dma-mapped (bus physical) address pointers.
265 * callback must check err and feedback in descriptor header
266 * for device processing status.
267 */
talitos_submit(struct device * dev,int ch,struct talitos_desc * desc,void (* callback)(struct device * dev,struct talitos_desc * desc,void * context,int error),void * context)268 static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
269 void (*callback)(struct device *dev,
270 struct talitos_desc *desc,
271 void *context, int error),
272 void *context)
273 {
274 struct talitos_private *priv = dev_get_drvdata(dev);
275 struct talitos_request *request;
276 unsigned long flags;
277 int head;
278 bool is_sec1 = has_ftr_sec1(priv);
279
280 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
281
282 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
283 /* h/w fifo is full */
284 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
285 return -EAGAIN;
286 }
287
288 head = priv->chan[ch].head;
289 request = &priv->chan[ch].fifo[head];
290
291 /* map descriptor and save caller data */
292 if (is_sec1) {
293 desc->hdr1 = desc->hdr;
294 request->dma_desc = dma_map_single(dev, &desc->hdr1,
295 TALITOS_DESC_SIZE,
296 DMA_BIDIRECTIONAL);
297 } else {
298 request->dma_desc = dma_map_single(dev, desc,
299 TALITOS_DESC_SIZE,
300 DMA_BIDIRECTIONAL);
301 }
302 request->callback = callback;
303 request->context = context;
304
305 /* increment fifo head */
306 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
307
308 smp_wmb();
309 request->desc = desc;
310
311 /* GO! */
312 wmb();
313 out_be32(priv->chan[ch].reg + TALITOS_FF,
314 upper_32_bits(request->dma_desc));
315 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
316 lower_32_bits(request->dma_desc));
317
318 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
319
320 return -EINPROGRESS;
321 }
322
get_request_hdr(struct talitos_request * request,bool is_sec1)323 static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
324 {
325 struct talitos_edesc *edesc;
326
327 if (!is_sec1)
328 return request->desc->hdr;
329
330 if (!request->desc->next_desc)
331 return request->desc->hdr1;
332
333 edesc = container_of(request->desc, struct talitos_edesc, desc);
334
335 return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
336 }
337
338 /*
339 * process what was done, notify callback of error if not
340 */
flush_channel(struct device * dev,int ch,int error,int reset_ch)341 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
342 {
343 struct talitos_private *priv = dev_get_drvdata(dev);
344 struct talitos_request *request, saved_req;
345 unsigned long flags;
346 int tail, status;
347 bool is_sec1 = has_ftr_sec1(priv);
348
349 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
350
351 tail = priv->chan[ch].tail;
352 while (priv->chan[ch].fifo[tail].desc) {
353 __be32 hdr;
354
355 request = &priv->chan[ch].fifo[tail];
356
357 /* descriptors with their done bits set don't get the error */
358 rmb();
359 hdr = get_request_hdr(request, is_sec1);
360
361 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
362 status = 0;
363 else
364 if (!error)
365 break;
366 else
367 status = error;
368
369 dma_unmap_single(dev, request->dma_desc,
370 TALITOS_DESC_SIZE,
371 DMA_BIDIRECTIONAL);
372
373 /* copy entries so we can call callback outside lock */
374 saved_req.desc = request->desc;
375 saved_req.callback = request->callback;
376 saved_req.context = request->context;
377
378 /* release request entry in fifo */
379 smp_wmb();
380 request->desc = NULL;
381
382 /* increment fifo tail */
383 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
384
385 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
386
387 atomic_dec(&priv->chan[ch].submit_count);
388
389 saved_req.callback(dev, saved_req.desc, saved_req.context,
390 status);
391 /* channel may resume processing in single desc error case */
392 if (error && !reset_ch && status == error)
393 return;
394 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
395 tail = priv->chan[ch].tail;
396 }
397
398 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
399 }
400
401 /*
402 * process completed requests for channels that have done status
403 */
404 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
405 static void talitos1_done_##name(unsigned long data) \
406 { \
407 struct device *dev = (struct device *)data; \
408 struct talitos_private *priv = dev_get_drvdata(dev); \
409 unsigned long flags; \
410 \
411 if (ch_done_mask & 0x10000000) \
412 flush_channel(dev, 0, 0, 0); \
413 if (ch_done_mask & 0x40000000) \
414 flush_channel(dev, 1, 0, 0); \
415 if (ch_done_mask & 0x00010000) \
416 flush_channel(dev, 2, 0, 0); \
417 if (ch_done_mask & 0x00040000) \
418 flush_channel(dev, 3, 0, 0); \
419 \
420 /* At this point, all completed channels have been processed */ \
421 /* Unmask done interrupts for channels completed later on. */ \
422 spin_lock_irqsave(&priv->reg_lock, flags); \
423 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
424 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
425 spin_unlock_irqrestore(&priv->reg_lock, flags); \
426 }
427
428 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
DEF_TALITOS1_DONE(ch0,TALITOS1_ISR_CH_0_DONE)429 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
430
431 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
432 static void talitos2_done_##name(unsigned long data) \
433 { \
434 struct device *dev = (struct device *)data; \
435 struct talitos_private *priv = dev_get_drvdata(dev); \
436 unsigned long flags; \
437 \
438 if (ch_done_mask & 1) \
439 flush_channel(dev, 0, 0, 0); \
440 if (ch_done_mask & (1 << 2)) \
441 flush_channel(dev, 1, 0, 0); \
442 if (ch_done_mask & (1 << 4)) \
443 flush_channel(dev, 2, 0, 0); \
444 if (ch_done_mask & (1 << 6)) \
445 flush_channel(dev, 3, 0, 0); \
446 \
447 /* At this point, all completed channels have been processed */ \
448 /* Unmask done interrupts for channels completed later on. */ \
449 spin_lock_irqsave(&priv->reg_lock, flags); \
450 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
451 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
452 spin_unlock_irqrestore(&priv->reg_lock, flags); \
453 }
454
455 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
456 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
457 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
458 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
459
460 /*
461 * locate current (offending) descriptor
462 */
463 static __be32 current_desc_hdr(struct device *dev, int ch)
464 {
465 struct talitos_private *priv = dev_get_drvdata(dev);
466 int tail, iter;
467 dma_addr_t cur_desc;
468
469 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
470 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
471
472 if (!cur_desc) {
473 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
474 return 0;
475 }
476
477 tail = priv->chan[ch].tail;
478
479 iter = tail;
480 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
481 priv->chan[ch].fifo[iter].desc->next_desc != cpu_to_be32(cur_desc)) {
482 iter = (iter + 1) & (priv->fifo_len - 1);
483 if (iter == tail) {
484 dev_err(dev, "couldn't locate current descriptor\n");
485 return 0;
486 }
487 }
488
489 if (priv->chan[ch].fifo[iter].desc->next_desc == cpu_to_be32(cur_desc)) {
490 struct talitos_edesc *edesc;
491
492 edesc = container_of(priv->chan[ch].fifo[iter].desc,
493 struct talitos_edesc, desc);
494 return ((struct talitos_desc *)
495 (edesc->buf + edesc->dma_len))->hdr;
496 }
497
498 return priv->chan[ch].fifo[iter].desc->hdr;
499 }
500
501 /*
502 * user diagnostics; report root cause of error based on execution unit status
503 */
report_eu_error(struct device * dev,int ch,__be32 desc_hdr)504 static void report_eu_error(struct device *dev, int ch, __be32 desc_hdr)
505 {
506 struct talitos_private *priv = dev_get_drvdata(dev);
507 int i;
508
509 if (!desc_hdr)
510 desc_hdr = cpu_to_be32(in_be32(priv->chan[ch].reg + TALITOS_DESCBUF));
511
512 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
513 case DESC_HDR_SEL0_AFEU:
514 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
515 in_be32(priv->reg_afeu + TALITOS_EUISR),
516 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
517 break;
518 case DESC_HDR_SEL0_DEU:
519 dev_err(dev, "DEUISR 0x%08x_%08x\n",
520 in_be32(priv->reg_deu + TALITOS_EUISR),
521 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
522 break;
523 case DESC_HDR_SEL0_MDEUA:
524 case DESC_HDR_SEL0_MDEUB:
525 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
526 in_be32(priv->reg_mdeu + TALITOS_EUISR),
527 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
528 break;
529 case DESC_HDR_SEL0_RNG:
530 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
531 in_be32(priv->reg_rngu + TALITOS_ISR),
532 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
533 break;
534 case DESC_HDR_SEL0_PKEU:
535 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
536 in_be32(priv->reg_pkeu + TALITOS_EUISR),
537 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
538 break;
539 case DESC_HDR_SEL0_AESU:
540 dev_err(dev, "AESUISR 0x%08x_%08x\n",
541 in_be32(priv->reg_aesu + TALITOS_EUISR),
542 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
543 break;
544 case DESC_HDR_SEL0_CRCU:
545 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
546 in_be32(priv->reg_crcu + TALITOS_EUISR),
547 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
548 break;
549 case DESC_HDR_SEL0_KEU:
550 dev_err(dev, "KEUISR 0x%08x_%08x\n",
551 in_be32(priv->reg_pkeu + TALITOS_EUISR),
552 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
553 break;
554 }
555
556 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
557 case DESC_HDR_SEL1_MDEUA:
558 case DESC_HDR_SEL1_MDEUB:
559 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
560 in_be32(priv->reg_mdeu + TALITOS_EUISR),
561 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
562 break;
563 case DESC_HDR_SEL1_CRCU:
564 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
565 in_be32(priv->reg_crcu + TALITOS_EUISR),
566 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
567 break;
568 }
569
570 for (i = 0; i < 8; i++)
571 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
572 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
573 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
574 }
575
576 /*
577 * recover from error interrupts
578 */
talitos_error(struct device * dev,u32 isr,u32 isr_lo)579 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
580 {
581 struct talitos_private *priv = dev_get_drvdata(dev);
582 unsigned int timeout = TALITOS_TIMEOUT;
583 int ch, error, reset_dev = 0;
584 u32 v_lo;
585 bool is_sec1 = has_ftr_sec1(priv);
586 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
587
588 for (ch = 0; ch < priv->num_channels; ch++) {
589 /* skip channels without errors */
590 if (is_sec1) {
591 /* bits 29, 31, 17, 19 */
592 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
593 continue;
594 } else {
595 if (!(isr & (1 << (ch * 2 + 1))))
596 continue;
597 }
598
599 error = -EINVAL;
600
601 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
602
603 if (v_lo & TALITOS_CCPSR_LO_DOF) {
604 dev_err(dev, "double fetch fifo overflow error\n");
605 error = -EAGAIN;
606 reset_ch = 1;
607 }
608 if (v_lo & TALITOS_CCPSR_LO_SOF) {
609 /* h/w dropped descriptor */
610 dev_err(dev, "single fetch fifo overflow error\n");
611 error = -EAGAIN;
612 }
613 if (v_lo & TALITOS_CCPSR_LO_MDTE)
614 dev_err(dev, "master data transfer error\n");
615 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
616 dev_err(dev, is_sec1 ? "pointer not complete error\n"
617 : "s/g data length zero error\n");
618 if (v_lo & TALITOS_CCPSR_LO_FPZ)
619 dev_err(dev, is_sec1 ? "parity error\n"
620 : "fetch pointer zero error\n");
621 if (v_lo & TALITOS_CCPSR_LO_IDH)
622 dev_err(dev, "illegal descriptor header error\n");
623 if (v_lo & TALITOS_CCPSR_LO_IEU)
624 dev_err(dev, is_sec1 ? "static assignment error\n"
625 : "invalid exec unit error\n");
626 if (v_lo & TALITOS_CCPSR_LO_EU)
627 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
628 if (!is_sec1) {
629 if (v_lo & TALITOS_CCPSR_LO_GB)
630 dev_err(dev, "gather boundary error\n");
631 if (v_lo & TALITOS_CCPSR_LO_GRL)
632 dev_err(dev, "gather return/length error\n");
633 if (v_lo & TALITOS_CCPSR_LO_SB)
634 dev_err(dev, "scatter boundary error\n");
635 if (v_lo & TALITOS_CCPSR_LO_SRL)
636 dev_err(dev, "scatter return/length error\n");
637 }
638
639 flush_channel(dev, ch, error, reset_ch);
640
641 if (reset_ch) {
642 reset_channel(dev, ch);
643 } else {
644 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
645 TALITOS2_CCCR_CONT);
646 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
647 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
648 TALITOS2_CCCR_CONT) && --timeout)
649 cpu_relax();
650 if (timeout == 0) {
651 dev_err(dev, "failed to restart channel %d\n",
652 ch);
653 reset_dev = 1;
654 }
655 }
656 }
657 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
658 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
659 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
660 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
661 isr, isr_lo);
662 else
663 dev_err(dev, "done overflow, internal time out, or "
664 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
665
666 /* purge request queues */
667 for (ch = 0; ch < priv->num_channels; ch++)
668 flush_channel(dev, ch, -EIO, 1);
669
670 /* reset and reinitialize the device */
671 init_device(dev);
672 }
673 }
674
675 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
676 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
677 { \
678 struct device *dev = data; \
679 struct talitos_private *priv = dev_get_drvdata(dev); \
680 u32 isr, isr_lo; \
681 unsigned long flags; \
682 \
683 spin_lock_irqsave(&priv->reg_lock, flags); \
684 isr = in_be32(priv->reg + TALITOS_ISR); \
685 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
686 /* Acknowledge interrupt */ \
687 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
688 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
689 \
690 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
691 spin_unlock_irqrestore(&priv->reg_lock, flags); \
692 talitos_error(dev, isr & ch_err_mask, isr_lo); \
693 } \
694 else { \
695 if (likely(isr & ch_done_mask)) { \
696 /* mask further done interrupts. */ \
697 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
698 /* done_task will unmask done interrupts at exit */ \
699 tasklet_schedule(&priv->done_task[tlet]); \
700 } \
701 spin_unlock_irqrestore(&priv->reg_lock, flags); \
702 } \
703 \
704 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
705 IRQ_NONE; \
706 }
707
708 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
709
710 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
711 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
712 { \
713 struct device *dev = data; \
714 struct talitos_private *priv = dev_get_drvdata(dev); \
715 u32 isr, isr_lo; \
716 unsigned long flags; \
717 \
718 spin_lock_irqsave(&priv->reg_lock, flags); \
719 isr = in_be32(priv->reg + TALITOS_ISR); \
720 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
721 /* Acknowledge interrupt */ \
722 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
723 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
724 \
725 if (unlikely(isr & ch_err_mask || isr_lo)) { \
726 spin_unlock_irqrestore(&priv->reg_lock, flags); \
727 talitos_error(dev, isr & ch_err_mask, isr_lo); \
728 } \
729 else { \
730 if (likely(isr & ch_done_mask)) { \
731 /* mask further done interrupts. */ \
732 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
733 /* done_task will unmask done interrupts at exit */ \
734 tasklet_schedule(&priv->done_task[tlet]); \
735 } \
736 spin_unlock_irqrestore(&priv->reg_lock, flags); \
737 } \
738 \
739 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
740 IRQ_NONE; \
741 }
742
743 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
744 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
745 0)
746 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
747 1)
748
749 /*
750 * hwrng
751 */
talitos_rng_data_present(struct hwrng * rng,int wait)752 static int talitos_rng_data_present(struct hwrng *rng, int wait)
753 {
754 struct device *dev = (struct device *)rng->priv;
755 struct talitos_private *priv = dev_get_drvdata(dev);
756 u32 ofl;
757 int i;
758
759 for (i = 0; i < 20; i++) {
760 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
761 TALITOS_RNGUSR_LO_OFL;
762 if (ofl || !wait)
763 break;
764 udelay(10);
765 }
766
767 return !!ofl;
768 }
769
talitos_rng_data_read(struct hwrng * rng,u32 * data)770 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
771 {
772 struct device *dev = (struct device *)rng->priv;
773 struct talitos_private *priv = dev_get_drvdata(dev);
774
775 /* rng fifo requires 64-bit accesses */
776 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
777 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
778
779 return sizeof(u32);
780 }
781
talitos_rng_init(struct hwrng * rng)782 static int talitos_rng_init(struct hwrng *rng)
783 {
784 struct device *dev = (struct device *)rng->priv;
785 struct talitos_private *priv = dev_get_drvdata(dev);
786 unsigned int timeout = TALITOS_TIMEOUT;
787
788 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
789 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
790 & TALITOS_RNGUSR_LO_RD)
791 && --timeout)
792 cpu_relax();
793 if (timeout == 0) {
794 dev_err(dev, "failed to reset rng hw\n");
795 return -ENODEV;
796 }
797
798 /* start generating */
799 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
800
801 return 0;
802 }
803
talitos_register_rng(struct device * dev)804 static int talitos_register_rng(struct device *dev)
805 {
806 struct talitos_private *priv = dev_get_drvdata(dev);
807 int err;
808
809 priv->rng.name = dev_driver_string(dev);
810 priv->rng.init = talitos_rng_init;
811 priv->rng.data_present = talitos_rng_data_present;
812 priv->rng.data_read = talitos_rng_data_read;
813 priv->rng.priv = (unsigned long)dev;
814
815 err = hwrng_register(&priv->rng);
816 if (!err)
817 priv->rng_registered = true;
818
819 return err;
820 }
821
talitos_unregister_rng(struct device * dev)822 static void talitos_unregister_rng(struct device *dev)
823 {
824 struct talitos_private *priv = dev_get_drvdata(dev);
825
826 if (!priv->rng_registered)
827 return;
828
829 hwrng_unregister(&priv->rng);
830 priv->rng_registered = false;
831 }
832
833 /*
834 * crypto alg
835 */
836 #define TALITOS_CRA_PRIORITY 3000
837 /*
838 * Defines a priority for doing AEAD with descriptors type
839 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
840 */
841 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
842 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
843 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
844 #else
845 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
846 #endif
847 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
848
849 struct talitos_ctx {
850 struct device *dev;
851 int ch;
852 __be32 desc_hdr_template;
853 u8 key[TALITOS_MAX_KEY_SIZE];
854 u8 iv[TALITOS_MAX_IV_LENGTH];
855 dma_addr_t dma_key;
856 unsigned int keylen;
857 unsigned int enckeylen;
858 unsigned int authkeylen;
859 };
860
861 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
862 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
863
864 struct talitos_ahash_req_ctx {
865 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
866 unsigned int hw_context_size;
867 u8 buf[2][HASH_MAX_BLOCK_SIZE];
868 int buf_idx;
869 unsigned int swinit;
870 unsigned int first;
871 unsigned int last;
872 unsigned int to_hash_later;
873 unsigned int nbuf;
874 struct scatterlist bufsl[2];
875 struct scatterlist *psrc;
876 };
877
878 struct talitos_export_state {
879 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
880 u8 buf[HASH_MAX_BLOCK_SIZE];
881 unsigned int swinit;
882 unsigned int first;
883 unsigned int last;
884 unsigned int to_hash_later;
885 unsigned int nbuf;
886 };
887
aead_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)888 static int aead_setkey(struct crypto_aead *authenc,
889 const u8 *key, unsigned int keylen)
890 {
891 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
892 struct device *dev = ctx->dev;
893 struct crypto_authenc_keys keys;
894
895 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
896 goto badkey;
897
898 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
899 goto badkey;
900
901 if (ctx->keylen)
902 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
903
904 memcpy(ctx->key, keys.authkey, keys.authkeylen);
905 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
906
907 ctx->keylen = keys.authkeylen + keys.enckeylen;
908 ctx->enckeylen = keys.enckeylen;
909 ctx->authkeylen = keys.authkeylen;
910 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
911 DMA_TO_DEVICE);
912
913 memzero_explicit(&keys, sizeof(keys));
914 return 0;
915
916 badkey:
917 memzero_explicit(&keys, sizeof(keys));
918 return -EINVAL;
919 }
920
aead_des3_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)921 static int aead_des3_setkey(struct crypto_aead *authenc,
922 const u8 *key, unsigned int keylen)
923 {
924 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
925 struct device *dev = ctx->dev;
926 struct crypto_authenc_keys keys;
927 int err;
928
929 err = crypto_authenc_extractkeys(&keys, key, keylen);
930 if (unlikely(err))
931 goto out;
932
933 err = -EINVAL;
934 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
935 goto out;
936
937 err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
938 if (err)
939 goto out;
940
941 if (ctx->keylen)
942 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
943
944 memcpy(ctx->key, keys.authkey, keys.authkeylen);
945 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
946
947 ctx->keylen = keys.authkeylen + keys.enckeylen;
948 ctx->enckeylen = keys.enckeylen;
949 ctx->authkeylen = keys.authkeylen;
950 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
951 DMA_TO_DEVICE);
952
953 out:
954 memzero_explicit(&keys, sizeof(keys));
955 return err;
956 }
957
talitos_sg_unmap(struct device * dev,struct talitos_edesc * edesc,struct scatterlist * src,struct scatterlist * dst,unsigned int len,unsigned int offset)958 static void talitos_sg_unmap(struct device *dev,
959 struct talitos_edesc *edesc,
960 struct scatterlist *src,
961 struct scatterlist *dst,
962 unsigned int len, unsigned int offset)
963 {
964 struct talitos_private *priv = dev_get_drvdata(dev);
965 bool is_sec1 = has_ftr_sec1(priv);
966 unsigned int src_nents = edesc->src_nents ? : 1;
967 unsigned int dst_nents = edesc->dst_nents ? : 1;
968
969 if (is_sec1 && dst && dst_nents > 1) {
970 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
971 len, DMA_FROM_DEVICE);
972 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
973 offset);
974 }
975 if (src != dst) {
976 if (src_nents == 1 || !is_sec1)
977 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
978
979 if (dst && (dst_nents == 1 || !is_sec1))
980 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
981 } else if (src_nents == 1 || !is_sec1) {
982 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
983 }
984 }
985
ipsec_esp_unmap(struct device * dev,struct talitos_edesc * edesc,struct aead_request * areq,bool encrypt)986 static void ipsec_esp_unmap(struct device *dev,
987 struct talitos_edesc *edesc,
988 struct aead_request *areq, bool encrypt)
989 {
990 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
991 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
992 unsigned int ivsize = crypto_aead_ivsize(aead);
993 unsigned int authsize = crypto_aead_authsize(aead);
994 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
995 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
996 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
997
998 if (is_ipsec_esp)
999 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1000 DMA_FROM_DEVICE);
1001 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1002
1003 talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1004 cryptlen + authsize, areq->assoclen);
1005
1006 if (edesc->dma_len)
1007 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1008 DMA_BIDIRECTIONAL);
1009
1010 if (!is_ipsec_esp) {
1011 unsigned int dst_nents = edesc->dst_nents ? : 1;
1012
1013 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1014 areq->assoclen + cryptlen - ivsize);
1015 }
1016 }
1017
1018 /*
1019 * ipsec_esp descriptor callbacks
1020 */
ipsec_esp_encrypt_done(struct device * dev,struct talitos_desc * desc,void * context,int err)1021 static void ipsec_esp_encrypt_done(struct device *dev,
1022 struct talitos_desc *desc, void *context,
1023 int err)
1024 {
1025 struct aead_request *areq = context;
1026 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1027 unsigned int ivsize = crypto_aead_ivsize(authenc);
1028 struct talitos_edesc *edesc;
1029
1030 edesc = container_of(desc, struct talitos_edesc, desc);
1031
1032 ipsec_esp_unmap(dev, edesc, areq, true);
1033
1034 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1035
1036 kfree(edesc);
1037
1038 aead_request_complete(areq, err);
1039 }
1040
ipsec_esp_decrypt_swauth_done(struct device * dev,struct talitos_desc * desc,void * context,int err)1041 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1042 struct talitos_desc *desc,
1043 void *context, int err)
1044 {
1045 struct aead_request *req = context;
1046 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1047 unsigned int authsize = crypto_aead_authsize(authenc);
1048 struct talitos_edesc *edesc;
1049 char *oicv, *icv;
1050
1051 edesc = container_of(desc, struct talitos_edesc, desc);
1052
1053 ipsec_esp_unmap(dev, edesc, req, false);
1054
1055 if (!err) {
1056 /* auth check */
1057 oicv = edesc->buf + edesc->dma_len;
1058 icv = oicv - authsize;
1059
1060 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1061 }
1062
1063 kfree(edesc);
1064
1065 aead_request_complete(req, err);
1066 }
1067
ipsec_esp_decrypt_hwauth_done(struct device * dev,struct talitos_desc * desc,void * context,int err)1068 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1069 struct talitos_desc *desc,
1070 void *context, int err)
1071 {
1072 struct aead_request *req = context;
1073 struct talitos_edesc *edesc;
1074
1075 edesc = container_of(desc, struct talitos_edesc, desc);
1076
1077 ipsec_esp_unmap(dev, edesc, req, false);
1078
1079 /* check ICV auth status */
1080 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1081 DESC_HDR_LO_ICCR1_PASS))
1082 err = -EBADMSG;
1083
1084 kfree(edesc);
1085
1086 aead_request_complete(req, err);
1087 }
1088
1089 /*
1090 * convert scatterlist to SEC h/w link table format
1091 * stop at cryptlen bytes
1092 */
sg_to_link_tbl_offset(struct scatterlist * sg,int sg_count,unsigned int offset,int datalen,int elen,struct talitos_ptr * link_tbl_ptr,int align)1093 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1094 unsigned int offset, int datalen, int elen,
1095 struct talitos_ptr *link_tbl_ptr, int align)
1096 {
1097 int n_sg = elen ? sg_count + 1 : sg_count;
1098 int count = 0;
1099 int cryptlen = datalen + elen;
1100 int padding = ALIGN(cryptlen, align) - cryptlen;
1101
1102 while (cryptlen && sg && n_sg--) {
1103 unsigned int len = sg_dma_len(sg);
1104
1105 if (offset >= len) {
1106 offset -= len;
1107 goto next;
1108 }
1109
1110 len -= offset;
1111
1112 if (len > cryptlen)
1113 len = cryptlen;
1114
1115 if (datalen > 0 && len > datalen) {
1116 to_talitos_ptr(link_tbl_ptr + count,
1117 sg_dma_address(sg) + offset, datalen, 0);
1118 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1119 count++;
1120 len -= datalen;
1121 offset += datalen;
1122 }
1123 to_talitos_ptr(link_tbl_ptr + count,
1124 sg_dma_address(sg) + offset, sg_next(sg) ? len : len + padding, 0);
1125 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1126 count++;
1127 cryptlen -= len;
1128 datalen -= len;
1129 offset = 0;
1130
1131 next:
1132 sg = sg_next(sg);
1133 }
1134
1135 /* tag end of link table */
1136 if (count > 0)
1137 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1138 DESC_PTR_LNKTBL_RET, 0);
1139
1140 return count;
1141 }
1142
talitos_sg_map_ext(struct device * dev,struct scatterlist * src,unsigned int len,struct talitos_edesc * edesc,struct talitos_ptr * ptr,int sg_count,unsigned int offset,int tbl_off,int elen,bool force,int align)1143 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1144 unsigned int len, struct talitos_edesc *edesc,
1145 struct talitos_ptr *ptr, int sg_count,
1146 unsigned int offset, int tbl_off, int elen,
1147 bool force, int align)
1148 {
1149 struct talitos_private *priv = dev_get_drvdata(dev);
1150 bool is_sec1 = has_ftr_sec1(priv);
1151 int aligned_len = ALIGN(len, align);
1152
1153 if (!src) {
1154 to_talitos_ptr(ptr, 0, 0, is_sec1);
1155 return 1;
1156 }
1157 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1158 if (sg_count == 1 && !force) {
1159 to_talitos_ptr(ptr, sg_dma_address(src) + offset, aligned_len, is_sec1);
1160 return sg_count;
1161 }
1162 if (is_sec1) {
1163 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, aligned_len, is_sec1);
1164 return sg_count;
1165 }
1166 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1167 &edesc->link_tbl[tbl_off], align);
1168 if (sg_count == 1 && !force) {
1169 /* Only one segment now, so no link tbl needed*/
1170 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1171 return sg_count;
1172 }
1173 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1174 tbl_off * sizeof(struct talitos_ptr), aligned_len, is_sec1);
1175 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1176
1177 return sg_count;
1178 }
1179
talitos_sg_map(struct device * dev,struct scatterlist * src,unsigned int len,struct talitos_edesc * edesc,struct talitos_ptr * ptr,int sg_count,unsigned int offset,int tbl_off)1180 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1181 unsigned int len, struct talitos_edesc *edesc,
1182 struct talitos_ptr *ptr, int sg_count,
1183 unsigned int offset, int tbl_off)
1184 {
1185 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1186 tbl_off, 0, false, 1);
1187 }
1188
1189 /*
1190 * fill in and submit ipsec_esp descriptor
1191 */
ipsec_esp(struct talitos_edesc * edesc,struct aead_request * areq,bool encrypt,void (* callback)(struct device * dev,struct talitos_desc * desc,void * context,int error))1192 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1193 bool encrypt,
1194 void (*callback)(struct device *dev,
1195 struct talitos_desc *desc,
1196 void *context, int error))
1197 {
1198 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1199 unsigned int authsize = crypto_aead_authsize(aead);
1200 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1201 struct device *dev = ctx->dev;
1202 struct talitos_desc *desc = &edesc->desc;
1203 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1204 unsigned int ivsize = crypto_aead_ivsize(aead);
1205 int tbl_off = 0;
1206 int sg_count, ret;
1207 int elen = 0;
1208 bool sync_needed = false;
1209 struct talitos_private *priv = dev_get_drvdata(dev);
1210 bool is_sec1 = has_ftr_sec1(priv);
1211 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1212 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1213 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1214 dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1215
1216 /* hmac key */
1217 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1218
1219 sg_count = edesc->src_nents ?: 1;
1220 if (is_sec1 && sg_count > 1)
1221 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1222 areq->assoclen + cryptlen);
1223 else
1224 sg_count = dma_map_sg(dev, areq->src, sg_count,
1225 (areq->src == areq->dst) ?
1226 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1227
1228 /* hmac data */
1229 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1230 &desc->ptr[1], sg_count, 0, tbl_off);
1231
1232 if (ret > 1) {
1233 tbl_off += ret;
1234 sync_needed = true;
1235 }
1236
1237 /* cipher iv */
1238 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1239
1240 /* cipher key */
1241 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
1242 ctx->enckeylen, is_sec1);
1243
1244 /*
1245 * cipher in
1246 * map and adjust cipher len to aead request cryptlen.
1247 * extent is bytes of HMAC postpended to ciphertext,
1248 * typically 12 for ipsec
1249 */
1250 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1251 elen = authsize;
1252
1253 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1254 sg_count, areq->assoclen, tbl_off, elen,
1255 false, 1);
1256
1257 if (ret > 1) {
1258 tbl_off += ret;
1259 sync_needed = true;
1260 }
1261
1262 /* cipher out */
1263 if (areq->src != areq->dst) {
1264 sg_count = edesc->dst_nents ? : 1;
1265 if (!is_sec1 || sg_count == 1)
1266 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1267 }
1268
1269 if (is_ipsec_esp && encrypt)
1270 elen = authsize;
1271 else
1272 elen = 0;
1273 ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1274 sg_count, areq->assoclen, tbl_off, elen,
1275 is_ipsec_esp && !encrypt, 1);
1276 tbl_off += ret;
1277
1278 if (!encrypt && is_ipsec_esp) {
1279 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1280
1281 /* Add an entry to the link table for ICV data */
1282 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1283 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1284
1285 /* icv data follows link tables */
1286 to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1287 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1288 sync_needed = true;
1289 } else if (!encrypt) {
1290 to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1291 sync_needed = true;
1292 } else if (!is_ipsec_esp) {
1293 talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1294 sg_count, areq->assoclen + cryptlen, tbl_off);
1295 }
1296
1297 /* iv out */
1298 if (is_ipsec_esp)
1299 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1300 DMA_FROM_DEVICE);
1301
1302 if (sync_needed)
1303 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1304 edesc->dma_len,
1305 DMA_BIDIRECTIONAL);
1306
1307 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1308 if (ret != -EINPROGRESS) {
1309 ipsec_esp_unmap(dev, edesc, areq, encrypt);
1310 kfree(edesc);
1311 }
1312 return ret;
1313 }
1314
1315 /*
1316 * allocate and map the extended descriptor
1317 */
talitos_edesc_alloc(struct device * dev,struct scatterlist * src,struct scatterlist * dst,u8 * iv,unsigned int assoclen,unsigned int cryptlen,unsigned int authsize,unsigned int ivsize,int icv_stashing,u32 cryptoflags,bool encrypt)1318 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1319 struct scatterlist *src,
1320 struct scatterlist *dst,
1321 u8 *iv,
1322 unsigned int assoclen,
1323 unsigned int cryptlen,
1324 unsigned int authsize,
1325 unsigned int ivsize,
1326 int icv_stashing,
1327 u32 cryptoflags,
1328 bool encrypt)
1329 {
1330 struct talitos_edesc *edesc;
1331 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1332 dma_addr_t iv_dma = 0;
1333 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1334 GFP_ATOMIC;
1335 struct talitos_private *priv = dev_get_drvdata(dev);
1336 bool is_sec1 = has_ftr_sec1(priv);
1337 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1338
1339 if (cryptlen + authsize > max_len) {
1340 dev_err(dev, "length exceeds h/w max limit\n");
1341 return ERR_PTR(-EINVAL);
1342 }
1343
1344 if (!dst || dst == src) {
1345 src_len = assoclen + cryptlen + authsize;
1346 src_nents = sg_nents_for_len(src, src_len);
1347 if (src_nents < 0) {
1348 dev_err(dev, "Invalid number of src SG.\n");
1349 return ERR_PTR(-EINVAL);
1350 }
1351 src_nents = (src_nents == 1) ? 0 : src_nents;
1352 dst_nents = dst ? src_nents : 0;
1353 dst_len = 0;
1354 } else { /* dst && dst != src*/
1355 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1356 src_nents = sg_nents_for_len(src, src_len);
1357 if (src_nents < 0) {
1358 dev_err(dev, "Invalid number of src SG.\n");
1359 return ERR_PTR(-EINVAL);
1360 }
1361 src_nents = (src_nents == 1) ? 0 : src_nents;
1362 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1363 dst_nents = sg_nents_for_len(dst, dst_len);
1364 if (dst_nents < 0) {
1365 dev_err(dev, "Invalid number of dst SG.\n");
1366 return ERR_PTR(-EINVAL);
1367 }
1368 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1369 }
1370
1371 /*
1372 * allocate space for base edesc plus the link tables,
1373 * allowing for two separate entries for AD and generated ICV (+ 2),
1374 * and space for two sets of ICVs (stashed and generated)
1375 */
1376 alloc_len = sizeof(struct talitos_edesc);
1377 if (src_nents || dst_nents || !encrypt) {
1378 if (is_sec1)
1379 dma_len = (src_nents ? src_len : 0) +
1380 (dst_nents ? dst_len : 0) + authsize;
1381 else
1382 dma_len = (src_nents + dst_nents + 2) *
1383 sizeof(struct talitos_ptr) + authsize;
1384 alloc_len += dma_len;
1385 } else {
1386 dma_len = 0;
1387 }
1388 alloc_len += icv_stashing ? authsize : 0;
1389
1390 /* if its a ahash, add space for a second desc next to the first one */
1391 if (is_sec1 && !dst)
1392 alloc_len += sizeof(struct talitos_desc);
1393 alloc_len += ivsize;
1394
1395 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1396 if (!edesc)
1397 return ERR_PTR(-ENOMEM);
1398 if (ivsize) {
1399 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1400 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1401 }
1402 memset(&edesc->desc, 0, sizeof(edesc->desc));
1403
1404 edesc->src_nents = src_nents;
1405 edesc->dst_nents = dst_nents;
1406 edesc->iv_dma = iv_dma;
1407 edesc->dma_len = dma_len;
1408 if (dma_len)
1409 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1410 edesc->dma_len,
1411 DMA_BIDIRECTIONAL);
1412
1413 return edesc;
1414 }
1415
aead_edesc_alloc(struct aead_request * areq,u8 * iv,int icv_stashing,bool encrypt)1416 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1417 int icv_stashing, bool encrypt)
1418 {
1419 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1420 unsigned int authsize = crypto_aead_authsize(authenc);
1421 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1422 unsigned int ivsize = crypto_aead_ivsize(authenc);
1423 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1424
1425 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1426 iv, areq->assoclen, cryptlen,
1427 authsize, ivsize, icv_stashing,
1428 areq->base.flags, encrypt);
1429 }
1430
aead_encrypt(struct aead_request * req)1431 static int aead_encrypt(struct aead_request *req)
1432 {
1433 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1434 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1435 struct talitos_edesc *edesc;
1436
1437 /* allocate extended descriptor */
1438 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1439 if (IS_ERR(edesc))
1440 return PTR_ERR(edesc);
1441
1442 /* set encrypt */
1443 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1444
1445 return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1446 }
1447
aead_decrypt(struct aead_request * req)1448 static int aead_decrypt(struct aead_request *req)
1449 {
1450 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1451 unsigned int authsize = crypto_aead_authsize(authenc);
1452 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1453 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1454 struct talitos_edesc *edesc;
1455 void *icvdata;
1456
1457 /* allocate extended descriptor */
1458 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1459 if (IS_ERR(edesc))
1460 return PTR_ERR(edesc);
1461
1462 if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1463 (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1464 ((!edesc->src_nents && !edesc->dst_nents) ||
1465 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1466
1467 /* decrypt and check the ICV */
1468 edesc->desc.hdr = ctx->desc_hdr_template |
1469 DESC_HDR_DIR_INBOUND |
1470 DESC_HDR_MODE1_MDEU_CICV;
1471
1472 /* reset integrity check result bits */
1473
1474 return ipsec_esp(edesc, req, false,
1475 ipsec_esp_decrypt_hwauth_done);
1476 }
1477
1478 /* Have to check the ICV with software */
1479 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1480
1481 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1482 icvdata = edesc->buf + edesc->dma_len;
1483
1484 sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1485 req->assoclen + req->cryptlen - authsize);
1486
1487 return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1488 }
1489
skcipher_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)1490 static int skcipher_setkey(struct crypto_skcipher *cipher,
1491 const u8 *key, unsigned int keylen)
1492 {
1493 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1494 struct device *dev = ctx->dev;
1495
1496 if (ctx->keylen)
1497 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1498
1499 memcpy(&ctx->key, key, keylen);
1500 ctx->keylen = keylen;
1501
1502 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1503
1504 return 0;
1505 }
1506
skcipher_des_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)1507 static int skcipher_des_setkey(struct crypto_skcipher *cipher,
1508 const u8 *key, unsigned int keylen)
1509 {
1510 return verify_skcipher_des_key(cipher, key) ?:
1511 skcipher_setkey(cipher, key, keylen);
1512 }
1513
skcipher_des3_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)1514 static int skcipher_des3_setkey(struct crypto_skcipher *cipher,
1515 const u8 *key, unsigned int keylen)
1516 {
1517 return verify_skcipher_des3_key(cipher, key) ?:
1518 skcipher_setkey(cipher, key, keylen);
1519 }
1520
skcipher_aes_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)1521 static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
1522 const u8 *key, unsigned int keylen)
1523 {
1524 if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1525 keylen == AES_KEYSIZE_256)
1526 return skcipher_setkey(cipher, key, keylen);
1527
1528 return -EINVAL;
1529 }
1530
common_nonsnoop_unmap(struct device * dev,struct talitos_edesc * edesc,struct skcipher_request * areq)1531 static void common_nonsnoop_unmap(struct device *dev,
1532 struct talitos_edesc *edesc,
1533 struct skcipher_request *areq)
1534 {
1535 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1536
1537 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 0);
1538 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1539
1540 if (edesc->dma_len)
1541 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1542 DMA_BIDIRECTIONAL);
1543 }
1544
skcipher_done(struct device * dev,struct talitos_desc * desc,void * context,int err)1545 static void skcipher_done(struct device *dev,
1546 struct talitos_desc *desc, void *context,
1547 int err)
1548 {
1549 struct skcipher_request *areq = context;
1550 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1551 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1552 unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1553 struct talitos_edesc *edesc;
1554
1555 edesc = container_of(desc, struct talitos_edesc, desc);
1556
1557 common_nonsnoop_unmap(dev, edesc, areq);
1558 memcpy(areq->iv, ctx->iv, ivsize);
1559
1560 kfree(edesc);
1561
1562 areq->base.complete(&areq->base, err);
1563 }
1564
common_nonsnoop(struct talitos_edesc * edesc,struct skcipher_request * areq,void (* callback)(struct device * dev,struct talitos_desc * desc,void * context,int error))1565 static int common_nonsnoop(struct talitos_edesc *edesc,
1566 struct skcipher_request *areq,
1567 void (*callback) (struct device *dev,
1568 struct talitos_desc *desc,
1569 void *context, int error))
1570 {
1571 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1572 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1573 struct device *dev = ctx->dev;
1574 struct talitos_desc *desc = &edesc->desc;
1575 unsigned int cryptlen = areq->cryptlen;
1576 unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1577 int sg_count, ret;
1578 bool sync_needed = false;
1579 struct talitos_private *priv = dev_get_drvdata(dev);
1580 bool is_sec1 = has_ftr_sec1(priv);
1581 bool is_ctr = (desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AESU &&
1582 (desc->hdr & DESC_HDR_MODE0_AESU_MASK) == DESC_HDR_MODE0_AESU_CTR;
1583
1584 /* first DWORD empty */
1585
1586 /* cipher iv */
1587 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1588
1589 /* cipher key */
1590 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1591
1592 sg_count = edesc->src_nents ?: 1;
1593 if (is_sec1 && sg_count > 1)
1594 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1595 cryptlen);
1596 else
1597 sg_count = dma_map_sg(dev, areq->src, sg_count,
1598 (areq->src == areq->dst) ?
1599 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1600 /*
1601 * cipher in
1602 */
1603 sg_count = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[3],
1604 sg_count, 0, 0, 0, false, is_ctr ? 16 : 1);
1605 if (sg_count > 1)
1606 sync_needed = true;
1607
1608 /* cipher out */
1609 if (areq->src != areq->dst) {
1610 sg_count = edesc->dst_nents ? : 1;
1611 if (!is_sec1 || sg_count == 1)
1612 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1613 }
1614
1615 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1616 sg_count, 0, (edesc->src_nents + 1));
1617 if (ret > 1)
1618 sync_needed = true;
1619
1620 /* iv out */
1621 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1622 DMA_FROM_DEVICE);
1623
1624 /* last DWORD empty */
1625
1626 if (sync_needed)
1627 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1628 edesc->dma_len, DMA_BIDIRECTIONAL);
1629
1630 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1631 if (ret != -EINPROGRESS) {
1632 common_nonsnoop_unmap(dev, edesc, areq);
1633 kfree(edesc);
1634 }
1635 return ret;
1636 }
1637
skcipher_edesc_alloc(struct skcipher_request * areq,bool encrypt)1638 static struct talitos_edesc *skcipher_edesc_alloc(struct skcipher_request *
1639 areq, bool encrypt)
1640 {
1641 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1642 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1643 unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1644
1645 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1646 areq->iv, 0, areq->cryptlen, 0, ivsize, 0,
1647 areq->base.flags, encrypt);
1648 }
1649
skcipher_encrypt(struct skcipher_request * areq)1650 static int skcipher_encrypt(struct skcipher_request *areq)
1651 {
1652 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1653 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1654 struct talitos_edesc *edesc;
1655 unsigned int blocksize =
1656 crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
1657
1658 if (!areq->cryptlen)
1659 return 0;
1660
1661 if (areq->cryptlen % blocksize)
1662 return -EINVAL;
1663
1664 /* allocate extended descriptor */
1665 edesc = skcipher_edesc_alloc(areq, true);
1666 if (IS_ERR(edesc))
1667 return PTR_ERR(edesc);
1668
1669 /* set encrypt */
1670 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1671
1672 return common_nonsnoop(edesc, areq, skcipher_done);
1673 }
1674
skcipher_decrypt(struct skcipher_request * areq)1675 static int skcipher_decrypt(struct skcipher_request *areq)
1676 {
1677 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1678 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1679 struct talitos_edesc *edesc;
1680 unsigned int blocksize =
1681 crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
1682
1683 if (!areq->cryptlen)
1684 return 0;
1685
1686 if (areq->cryptlen % blocksize)
1687 return -EINVAL;
1688
1689 /* allocate extended descriptor */
1690 edesc = skcipher_edesc_alloc(areq, false);
1691 if (IS_ERR(edesc))
1692 return PTR_ERR(edesc);
1693
1694 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1695
1696 return common_nonsnoop(edesc, areq, skcipher_done);
1697 }
1698
common_nonsnoop_hash_unmap(struct device * dev,struct talitos_edesc * edesc,struct ahash_request * areq)1699 static void common_nonsnoop_hash_unmap(struct device *dev,
1700 struct talitos_edesc *edesc,
1701 struct ahash_request *areq)
1702 {
1703 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1704 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1705 struct talitos_private *priv = dev_get_drvdata(dev);
1706 bool is_sec1 = has_ftr_sec1(priv);
1707 struct talitos_desc *desc = &edesc->desc;
1708 struct talitos_desc *desc2 = (struct talitos_desc *)
1709 (edesc->buf + edesc->dma_len);
1710
1711 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1712 if (desc->next_desc &&
1713 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1714 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1715 if (req_ctx->last)
1716 memcpy(areq->result, req_ctx->hw_context,
1717 crypto_ahash_digestsize(tfm));
1718
1719 if (req_ctx->psrc)
1720 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1721
1722 /* When using hashctx-in, must unmap it. */
1723 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1724 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1725 DMA_TO_DEVICE);
1726 else if (desc->next_desc)
1727 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1728 DMA_TO_DEVICE);
1729
1730 if (is_sec1 && req_ctx->nbuf)
1731 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1732 DMA_TO_DEVICE);
1733
1734 if (edesc->dma_len)
1735 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1736 DMA_BIDIRECTIONAL);
1737
1738 if (edesc->desc.next_desc)
1739 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1740 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1741 }
1742
ahash_done(struct device * dev,struct talitos_desc * desc,void * context,int err)1743 static void ahash_done(struct device *dev,
1744 struct talitos_desc *desc, void *context,
1745 int err)
1746 {
1747 struct ahash_request *areq = context;
1748 struct talitos_edesc *edesc =
1749 container_of(desc, struct talitos_edesc, desc);
1750 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1751
1752 if (!req_ctx->last && req_ctx->to_hash_later) {
1753 /* Position any partial block for next update/final/finup */
1754 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1755 req_ctx->nbuf = req_ctx->to_hash_later;
1756 }
1757 common_nonsnoop_hash_unmap(dev, edesc, areq);
1758
1759 kfree(edesc);
1760
1761 areq->base.complete(&areq->base, err);
1762 }
1763
1764 /*
1765 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1766 * ourself and submit a padded block
1767 */
talitos_handle_buggy_hash(struct talitos_ctx * ctx,struct talitos_edesc * edesc,struct talitos_ptr * ptr)1768 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1769 struct talitos_edesc *edesc,
1770 struct talitos_ptr *ptr)
1771 {
1772 static u8 padded_hash[64] = {
1773 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1774 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1775 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1776 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1777 };
1778
1779 pr_err_once("Bug in SEC1, padding ourself\n");
1780 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1781 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1782 (char *)padded_hash, DMA_TO_DEVICE);
1783 }
1784
common_nonsnoop_hash(struct talitos_edesc * edesc,struct ahash_request * areq,unsigned int length,void (* callback)(struct device * dev,struct talitos_desc * desc,void * context,int error))1785 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1786 struct ahash_request *areq, unsigned int length,
1787 void (*callback) (struct device *dev,
1788 struct talitos_desc *desc,
1789 void *context, int error))
1790 {
1791 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1792 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1793 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1794 struct device *dev = ctx->dev;
1795 struct talitos_desc *desc = &edesc->desc;
1796 int ret;
1797 bool sync_needed = false;
1798 struct talitos_private *priv = dev_get_drvdata(dev);
1799 bool is_sec1 = has_ftr_sec1(priv);
1800 int sg_count;
1801
1802 /* first DWORD empty */
1803
1804 /* hash context in */
1805 if (!req_ctx->first || req_ctx->swinit) {
1806 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1807 req_ctx->hw_context_size,
1808 req_ctx->hw_context,
1809 DMA_TO_DEVICE);
1810 req_ctx->swinit = 0;
1811 }
1812 /* Indicate next op is not the first. */
1813 req_ctx->first = 0;
1814
1815 /* HMAC key */
1816 if (ctx->keylen)
1817 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1818 is_sec1);
1819
1820 if (is_sec1 && req_ctx->nbuf)
1821 length -= req_ctx->nbuf;
1822
1823 sg_count = edesc->src_nents ?: 1;
1824 if (is_sec1 && sg_count > 1)
1825 sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1826 else if (length)
1827 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1828 DMA_TO_DEVICE);
1829 /*
1830 * data in
1831 */
1832 if (is_sec1 && req_ctx->nbuf) {
1833 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1834 req_ctx->buf[req_ctx->buf_idx],
1835 DMA_TO_DEVICE);
1836 } else {
1837 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1838 &desc->ptr[3], sg_count, 0, 0);
1839 if (sg_count > 1)
1840 sync_needed = true;
1841 }
1842
1843 /* fifth DWORD empty */
1844
1845 /* hash/HMAC out -or- hash context out */
1846 if (req_ctx->last)
1847 map_single_talitos_ptr(dev, &desc->ptr[5],
1848 crypto_ahash_digestsize(tfm),
1849 req_ctx->hw_context, DMA_FROM_DEVICE);
1850 else
1851 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1852 req_ctx->hw_context_size,
1853 req_ctx->hw_context,
1854 DMA_FROM_DEVICE);
1855
1856 /* last DWORD empty */
1857
1858 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1859 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1860
1861 if (is_sec1 && req_ctx->nbuf && length) {
1862 struct talitos_desc *desc2 = (struct talitos_desc *)
1863 (edesc->buf + edesc->dma_len);
1864 dma_addr_t next_desc;
1865
1866 memset(desc2, 0, sizeof(*desc2));
1867 desc2->hdr = desc->hdr;
1868 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1869 desc2->hdr1 = desc2->hdr;
1870 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1871 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1872 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1873
1874 if (desc->ptr[1].ptr)
1875 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1876 is_sec1);
1877 else
1878 map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1879 req_ctx->hw_context_size,
1880 req_ctx->hw_context,
1881 DMA_TO_DEVICE);
1882 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1883 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1884 &desc2->ptr[3], sg_count, 0, 0);
1885 if (sg_count > 1)
1886 sync_needed = true;
1887 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1888 if (req_ctx->last)
1889 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1890 req_ctx->hw_context_size,
1891 req_ctx->hw_context,
1892 DMA_FROM_DEVICE);
1893
1894 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1895 DMA_BIDIRECTIONAL);
1896 desc->next_desc = cpu_to_be32(next_desc);
1897 }
1898
1899 if (sync_needed)
1900 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1901 edesc->dma_len, DMA_BIDIRECTIONAL);
1902
1903 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1904 if (ret != -EINPROGRESS) {
1905 common_nonsnoop_hash_unmap(dev, edesc, areq);
1906 kfree(edesc);
1907 }
1908 return ret;
1909 }
1910
ahash_edesc_alloc(struct ahash_request * areq,unsigned int nbytes)1911 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1912 unsigned int nbytes)
1913 {
1914 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1915 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1916 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1917 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1918 bool is_sec1 = has_ftr_sec1(priv);
1919
1920 if (is_sec1)
1921 nbytes -= req_ctx->nbuf;
1922
1923 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1924 nbytes, 0, 0, 0, areq->base.flags, false);
1925 }
1926
ahash_init(struct ahash_request * areq)1927 static int ahash_init(struct ahash_request *areq)
1928 {
1929 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1930 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1931 struct device *dev = ctx->dev;
1932 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1933 unsigned int size;
1934 dma_addr_t dma;
1935
1936 /* Initialize the context */
1937 req_ctx->buf_idx = 0;
1938 req_ctx->nbuf = 0;
1939 req_ctx->first = 1; /* first indicates h/w must init its context */
1940 req_ctx->swinit = 0; /* assume h/w init of context */
1941 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1942 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1943 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1944 req_ctx->hw_context_size = size;
1945
1946 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1947 DMA_TO_DEVICE);
1948 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1949
1950 return 0;
1951 }
1952
1953 /*
1954 * on h/w without explicit sha224 support, we initialize h/w context
1955 * manually with sha224 constants, and tell it to run sha256.
1956 */
ahash_init_sha224_swinit(struct ahash_request * areq)1957 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1958 {
1959 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1960
1961 req_ctx->hw_context[0] = SHA224_H0;
1962 req_ctx->hw_context[1] = SHA224_H1;
1963 req_ctx->hw_context[2] = SHA224_H2;
1964 req_ctx->hw_context[3] = SHA224_H3;
1965 req_ctx->hw_context[4] = SHA224_H4;
1966 req_ctx->hw_context[5] = SHA224_H5;
1967 req_ctx->hw_context[6] = SHA224_H6;
1968 req_ctx->hw_context[7] = SHA224_H7;
1969
1970 /* init 64-bit count */
1971 req_ctx->hw_context[8] = 0;
1972 req_ctx->hw_context[9] = 0;
1973
1974 ahash_init(areq);
1975 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1976
1977 return 0;
1978 }
1979
ahash_process_req(struct ahash_request * areq,unsigned int nbytes)1980 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1981 {
1982 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1983 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1984 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1985 struct talitos_edesc *edesc;
1986 unsigned int blocksize =
1987 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1988 unsigned int nbytes_to_hash;
1989 unsigned int to_hash_later;
1990 unsigned int nsg;
1991 int nents;
1992 struct device *dev = ctx->dev;
1993 struct talitos_private *priv = dev_get_drvdata(dev);
1994 bool is_sec1 = has_ftr_sec1(priv);
1995 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1996
1997 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1998 /* Buffer up to one whole block */
1999 nents = sg_nents_for_len(areq->src, nbytes);
2000 if (nents < 0) {
2001 dev_err(ctx->dev, "Invalid number of src SG.\n");
2002 return nents;
2003 }
2004 sg_copy_to_buffer(areq->src, nents,
2005 ctx_buf + req_ctx->nbuf, nbytes);
2006 req_ctx->nbuf += nbytes;
2007 return 0;
2008 }
2009
2010 /* At least (blocksize + 1) bytes are available to hash */
2011 nbytes_to_hash = nbytes + req_ctx->nbuf;
2012 to_hash_later = nbytes_to_hash & (blocksize - 1);
2013
2014 if (req_ctx->last)
2015 to_hash_later = 0;
2016 else if (to_hash_later)
2017 /* There is a partial block. Hash the full block(s) now */
2018 nbytes_to_hash -= to_hash_later;
2019 else {
2020 /* Keep one block buffered */
2021 nbytes_to_hash -= blocksize;
2022 to_hash_later = blocksize;
2023 }
2024
2025 /* Chain in any previously buffered data */
2026 if (!is_sec1 && req_ctx->nbuf) {
2027 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2028 sg_init_table(req_ctx->bufsl, nsg);
2029 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2030 if (nsg > 1)
2031 sg_chain(req_ctx->bufsl, 2, areq->src);
2032 req_ctx->psrc = req_ctx->bufsl;
2033 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2034 int offset;
2035
2036 if (nbytes_to_hash > blocksize)
2037 offset = blocksize - req_ctx->nbuf;
2038 else
2039 offset = nbytes_to_hash - req_ctx->nbuf;
2040 nents = sg_nents_for_len(areq->src, offset);
2041 if (nents < 0) {
2042 dev_err(ctx->dev, "Invalid number of src SG.\n");
2043 return nents;
2044 }
2045 sg_copy_to_buffer(areq->src, nents,
2046 ctx_buf + req_ctx->nbuf, offset);
2047 req_ctx->nbuf += offset;
2048 req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2049 offset);
2050 } else
2051 req_ctx->psrc = areq->src;
2052
2053 if (to_hash_later) {
2054 nents = sg_nents_for_len(areq->src, nbytes);
2055 if (nents < 0) {
2056 dev_err(ctx->dev, "Invalid number of src SG.\n");
2057 return nents;
2058 }
2059 sg_pcopy_to_buffer(areq->src, nents,
2060 req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2061 to_hash_later,
2062 nbytes - to_hash_later);
2063 }
2064 req_ctx->to_hash_later = to_hash_later;
2065
2066 /* Allocate extended descriptor */
2067 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2068 if (IS_ERR(edesc))
2069 return PTR_ERR(edesc);
2070
2071 edesc->desc.hdr = ctx->desc_hdr_template;
2072
2073 /* On last one, request SEC to pad; otherwise continue */
2074 if (req_ctx->last)
2075 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2076 else
2077 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2078
2079 /* request SEC to INIT hash. */
2080 if (req_ctx->first && !req_ctx->swinit)
2081 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2082
2083 /* When the tfm context has a keylen, it's an HMAC.
2084 * A first or last (ie. not middle) descriptor must request HMAC.
2085 */
2086 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2087 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2088
2089 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
2090 }
2091
ahash_update(struct ahash_request * areq)2092 static int ahash_update(struct ahash_request *areq)
2093 {
2094 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2095
2096 req_ctx->last = 0;
2097
2098 return ahash_process_req(areq, areq->nbytes);
2099 }
2100
ahash_final(struct ahash_request * areq)2101 static int ahash_final(struct ahash_request *areq)
2102 {
2103 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2104
2105 req_ctx->last = 1;
2106
2107 return ahash_process_req(areq, 0);
2108 }
2109
ahash_finup(struct ahash_request * areq)2110 static int ahash_finup(struct ahash_request *areq)
2111 {
2112 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2113
2114 req_ctx->last = 1;
2115
2116 return ahash_process_req(areq, areq->nbytes);
2117 }
2118
ahash_digest(struct ahash_request * areq)2119 static int ahash_digest(struct ahash_request *areq)
2120 {
2121 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2122 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2123
2124 ahash->init(areq);
2125 req_ctx->last = 1;
2126
2127 return ahash_process_req(areq, areq->nbytes);
2128 }
2129
ahash_export(struct ahash_request * areq,void * out)2130 static int ahash_export(struct ahash_request *areq, void *out)
2131 {
2132 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2133 struct talitos_export_state *export = out;
2134 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2135 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2136 struct device *dev = ctx->dev;
2137 dma_addr_t dma;
2138
2139 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2140 DMA_FROM_DEVICE);
2141 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2142
2143 memcpy(export->hw_context, req_ctx->hw_context,
2144 req_ctx->hw_context_size);
2145 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2146 export->swinit = req_ctx->swinit;
2147 export->first = req_ctx->first;
2148 export->last = req_ctx->last;
2149 export->to_hash_later = req_ctx->to_hash_later;
2150 export->nbuf = req_ctx->nbuf;
2151
2152 return 0;
2153 }
2154
ahash_import(struct ahash_request * areq,const void * in)2155 static int ahash_import(struct ahash_request *areq, const void *in)
2156 {
2157 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2158 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2159 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2160 struct device *dev = ctx->dev;
2161 const struct talitos_export_state *export = in;
2162 unsigned int size;
2163 dma_addr_t dma;
2164
2165 memset(req_ctx, 0, sizeof(*req_ctx));
2166 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2167 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2168 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2169 req_ctx->hw_context_size = size;
2170 memcpy(req_ctx->hw_context, export->hw_context, size);
2171 memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2172 req_ctx->swinit = export->swinit;
2173 req_ctx->first = export->first;
2174 req_ctx->last = export->last;
2175 req_ctx->to_hash_later = export->to_hash_later;
2176 req_ctx->nbuf = export->nbuf;
2177
2178 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2179 DMA_TO_DEVICE);
2180 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2181
2182 return 0;
2183 }
2184
keyhash(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen,u8 * hash)2185 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2186 u8 *hash)
2187 {
2188 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2189
2190 struct scatterlist sg[1];
2191 struct ahash_request *req;
2192 struct crypto_wait wait;
2193 int ret;
2194
2195 crypto_init_wait(&wait);
2196
2197 req = ahash_request_alloc(tfm, GFP_KERNEL);
2198 if (!req)
2199 return -ENOMEM;
2200
2201 /* Keep tfm keylen == 0 during hash of the long key */
2202 ctx->keylen = 0;
2203 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2204 crypto_req_done, &wait);
2205
2206 sg_init_one(&sg[0], key, keylen);
2207
2208 ahash_request_set_crypt(req, sg, hash, keylen);
2209 ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2210
2211 ahash_request_free(req);
2212
2213 return ret;
2214 }
2215
ahash_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)2216 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2217 unsigned int keylen)
2218 {
2219 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2220 struct device *dev = ctx->dev;
2221 unsigned int blocksize =
2222 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2223 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2224 unsigned int keysize = keylen;
2225 u8 hash[SHA512_DIGEST_SIZE];
2226 int ret;
2227
2228 if (keylen <= blocksize)
2229 memcpy(ctx->key, key, keysize);
2230 else {
2231 /* Must get the hash of the long key */
2232 ret = keyhash(tfm, key, keylen, hash);
2233
2234 if (ret)
2235 return -EINVAL;
2236
2237 keysize = digestsize;
2238 memcpy(ctx->key, hash, digestsize);
2239 }
2240
2241 if (ctx->keylen)
2242 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2243
2244 ctx->keylen = keysize;
2245 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2246
2247 return 0;
2248 }
2249
2250
2251 struct talitos_alg_template {
2252 u32 type;
2253 u32 priority;
2254 union {
2255 struct skcipher_alg skcipher;
2256 struct ahash_alg hash;
2257 struct aead_alg aead;
2258 } alg;
2259 __be32 desc_hdr_template;
2260 };
2261
2262 static struct talitos_alg_template driver_algs[] = {
2263 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2264 { .type = CRYPTO_ALG_TYPE_AEAD,
2265 .alg.aead = {
2266 .base = {
2267 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2268 .cra_driver_name = "authenc-hmac-sha1-"
2269 "cbc-aes-talitos",
2270 .cra_blocksize = AES_BLOCK_SIZE,
2271 .cra_flags = CRYPTO_ALG_ASYNC |
2272 CRYPTO_ALG_ALLOCATES_MEMORY,
2273 },
2274 .ivsize = AES_BLOCK_SIZE,
2275 .maxauthsize = SHA1_DIGEST_SIZE,
2276 },
2277 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2278 DESC_HDR_SEL0_AESU |
2279 DESC_HDR_MODE0_AESU_CBC |
2280 DESC_HDR_SEL1_MDEUA |
2281 DESC_HDR_MODE1_MDEU_INIT |
2282 DESC_HDR_MODE1_MDEU_PAD |
2283 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2284 },
2285 { .type = CRYPTO_ALG_TYPE_AEAD,
2286 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2287 .alg.aead = {
2288 .base = {
2289 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2290 .cra_driver_name = "authenc-hmac-sha1-"
2291 "cbc-aes-talitos-hsna",
2292 .cra_blocksize = AES_BLOCK_SIZE,
2293 .cra_flags = CRYPTO_ALG_ASYNC |
2294 CRYPTO_ALG_ALLOCATES_MEMORY,
2295 },
2296 .ivsize = AES_BLOCK_SIZE,
2297 .maxauthsize = SHA1_DIGEST_SIZE,
2298 },
2299 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2300 DESC_HDR_SEL0_AESU |
2301 DESC_HDR_MODE0_AESU_CBC |
2302 DESC_HDR_SEL1_MDEUA |
2303 DESC_HDR_MODE1_MDEU_INIT |
2304 DESC_HDR_MODE1_MDEU_PAD |
2305 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2306 },
2307 { .type = CRYPTO_ALG_TYPE_AEAD,
2308 .alg.aead = {
2309 .base = {
2310 .cra_name = "authenc(hmac(sha1),"
2311 "cbc(des3_ede))",
2312 .cra_driver_name = "authenc-hmac-sha1-"
2313 "cbc-3des-talitos",
2314 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2315 .cra_flags = CRYPTO_ALG_ASYNC |
2316 CRYPTO_ALG_ALLOCATES_MEMORY,
2317 },
2318 .ivsize = DES3_EDE_BLOCK_SIZE,
2319 .maxauthsize = SHA1_DIGEST_SIZE,
2320 .setkey = aead_des3_setkey,
2321 },
2322 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2323 DESC_HDR_SEL0_DEU |
2324 DESC_HDR_MODE0_DEU_CBC |
2325 DESC_HDR_MODE0_DEU_3DES |
2326 DESC_HDR_SEL1_MDEUA |
2327 DESC_HDR_MODE1_MDEU_INIT |
2328 DESC_HDR_MODE1_MDEU_PAD |
2329 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2330 },
2331 { .type = CRYPTO_ALG_TYPE_AEAD,
2332 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2333 .alg.aead = {
2334 .base = {
2335 .cra_name = "authenc(hmac(sha1),"
2336 "cbc(des3_ede))",
2337 .cra_driver_name = "authenc-hmac-sha1-"
2338 "cbc-3des-talitos-hsna",
2339 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2340 .cra_flags = CRYPTO_ALG_ASYNC |
2341 CRYPTO_ALG_ALLOCATES_MEMORY,
2342 },
2343 .ivsize = DES3_EDE_BLOCK_SIZE,
2344 .maxauthsize = SHA1_DIGEST_SIZE,
2345 .setkey = aead_des3_setkey,
2346 },
2347 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2348 DESC_HDR_SEL0_DEU |
2349 DESC_HDR_MODE0_DEU_CBC |
2350 DESC_HDR_MODE0_DEU_3DES |
2351 DESC_HDR_SEL1_MDEUA |
2352 DESC_HDR_MODE1_MDEU_INIT |
2353 DESC_HDR_MODE1_MDEU_PAD |
2354 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2355 },
2356 { .type = CRYPTO_ALG_TYPE_AEAD,
2357 .alg.aead = {
2358 .base = {
2359 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2360 .cra_driver_name = "authenc-hmac-sha224-"
2361 "cbc-aes-talitos",
2362 .cra_blocksize = AES_BLOCK_SIZE,
2363 .cra_flags = CRYPTO_ALG_ASYNC |
2364 CRYPTO_ALG_ALLOCATES_MEMORY,
2365 },
2366 .ivsize = AES_BLOCK_SIZE,
2367 .maxauthsize = SHA224_DIGEST_SIZE,
2368 },
2369 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2370 DESC_HDR_SEL0_AESU |
2371 DESC_HDR_MODE0_AESU_CBC |
2372 DESC_HDR_SEL1_MDEUA |
2373 DESC_HDR_MODE1_MDEU_INIT |
2374 DESC_HDR_MODE1_MDEU_PAD |
2375 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2376 },
2377 { .type = CRYPTO_ALG_TYPE_AEAD,
2378 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2379 .alg.aead = {
2380 .base = {
2381 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2382 .cra_driver_name = "authenc-hmac-sha224-"
2383 "cbc-aes-talitos-hsna",
2384 .cra_blocksize = AES_BLOCK_SIZE,
2385 .cra_flags = CRYPTO_ALG_ASYNC |
2386 CRYPTO_ALG_ALLOCATES_MEMORY,
2387 },
2388 .ivsize = AES_BLOCK_SIZE,
2389 .maxauthsize = SHA224_DIGEST_SIZE,
2390 },
2391 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2392 DESC_HDR_SEL0_AESU |
2393 DESC_HDR_MODE0_AESU_CBC |
2394 DESC_HDR_SEL1_MDEUA |
2395 DESC_HDR_MODE1_MDEU_INIT |
2396 DESC_HDR_MODE1_MDEU_PAD |
2397 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2398 },
2399 { .type = CRYPTO_ALG_TYPE_AEAD,
2400 .alg.aead = {
2401 .base = {
2402 .cra_name = "authenc(hmac(sha224),"
2403 "cbc(des3_ede))",
2404 .cra_driver_name = "authenc-hmac-sha224-"
2405 "cbc-3des-talitos",
2406 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2407 .cra_flags = CRYPTO_ALG_ASYNC |
2408 CRYPTO_ALG_ALLOCATES_MEMORY,
2409 },
2410 .ivsize = DES3_EDE_BLOCK_SIZE,
2411 .maxauthsize = SHA224_DIGEST_SIZE,
2412 .setkey = aead_des3_setkey,
2413 },
2414 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2415 DESC_HDR_SEL0_DEU |
2416 DESC_HDR_MODE0_DEU_CBC |
2417 DESC_HDR_MODE0_DEU_3DES |
2418 DESC_HDR_SEL1_MDEUA |
2419 DESC_HDR_MODE1_MDEU_INIT |
2420 DESC_HDR_MODE1_MDEU_PAD |
2421 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2422 },
2423 { .type = CRYPTO_ALG_TYPE_AEAD,
2424 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2425 .alg.aead = {
2426 .base = {
2427 .cra_name = "authenc(hmac(sha224),"
2428 "cbc(des3_ede))",
2429 .cra_driver_name = "authenc-hmac-sha224-"
2430 "cbc-3des-talitos-hsna",
2431 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2432 .cra_flags = CRYPTO_ALG_ASYNC |
2433 CRYPTO_ALG_ALLOCATES_MEMORY,
2434 },
2435 .ivsize = DES3_EDE_BLOCK_SIZE,
2436 .maxauthsize = SHA224_DIGEST_SIZE,
2437 .setkey = aead_des3_setkey,
2438 },
2439 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2440 DESC_HDR_SEL0_DEU |
2441 DESC_HDR_MODE0_DEU_CBC |
2442 DESC_HDR_MODE0_DEU_3DES |
2443 DESC_HDR_SEL1_MDEUA |
2444 DESC_HDR_MODE1_MDEU_INIT |
2445 DESC_HDR_MODE1_MDEU_PAD |
2446 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2447 },
2448 { .type = CRYPTO_ALG_TYPE_AEAD,
2449 .alg.aead = {
2450 .base = {
2451 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2452 .cra_driver_name = "authenc-hmac-sha256-"
2453 "cbc-aes-talitos",
2454 .cra_blocksize = AES_BLOCK_SIZE,
2455 .cra_flags = CRYPTO_ALG_ASYNC |
2456 CRYPTO_ALG_ALLOCATES_MEMORY,
2457 },
2458 .ivsize = AES_BLOCK_SIZE,
2459 .maxauthsize = SHA256_DIGEST_SIZE,
2460 },
2461 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2462 DESC_HDR_SEL0_AESU |
2463 DESC_HDR_MODE0_AESU_CBC |
2464 DESC_HDR_SEL1_MDEUA |
2465 DESC_HDR_MODE1_MDEU_INIT |
2466 DESC_HDR_MODE1_MDEU_PAD |
2467 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2468 },
2469 { .type = CRYPTO_ALG_TYPE_AEAD,
2470 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2471 .alg.aead = {
2472 .base = {
2473 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2474 .cra_driver_name = "authenc-hmac-sha256-"
2475 "cbc-aes-talitos-hsna",
2476 .cra_blocksize = AES_BLOCK_SIZE,
2477 .cra_flags = CRYPTO_ALG_ASYNC |
2478 CRYPTO_ALG_ALLOCATES_MEMORY,
2479 },
2480 .ivsize = AES_BLOCK_SIZE,
2481 .maxauthsize = SHA256_DIGEST_SIZE,
2482 },
2483 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2484 DESC_HDR_SEL0_AESU |
2485 DESC_HDR_MODE0_AESU_CBC |
2486 DESC_HDR_SEL1_MDEUA |
2487 DESC_HDR_MODE1_MDEU_INIT |
2488 DESC_HDR_MODE1_MDEU_PAD |
2489 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2490 },
2491 { .type = CRYPTO_ALG_TYPE_AEAD,
2492 .alg.aead = {
2493 .base = {
2494 .cra_name = "authenc(hmac(sha256),"
2495 "cbc(des3_ede))",
2496 .cra_driver_name = "authenc-hmac-sha256-"
2497 "cbc-3des-talitos",
2498 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2499 .cra_flags = CRYPTO_ALG_ASYNC |
2500 CRYPTO_ALG_ALLOCATES_MEMORY,
2501 },
2502 .ivsize = DES3_EDE_BLOCK_SIZE,
2503 .maxauthsize = SHA256_DIGEST_SIZE,
2504 .setkey = aead_des3_setkey,
2505 },
2506 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2507 DESC_HDR_SEL0_DEU |
2508 DESC_HDR_MODE0_DEU_CBC |
2509 DESC_HDR_MODE0_DEU_3DES |
2510 DESC_HDR_SEL1_MDEUA |
2511 DESC_HDR_MODE1_MDEU_INIT |
2512 DESC_HDR_MODE1_MDEU_PAD |
2513 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2514 },
2515 { .type = CRYPTO_ALG_TYPE_AEAD,
2516 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2517 .alg.aead = {
2518 .base = {
2519 .cra_name = "authenc(hmac(sha256),"
2520 "cbc(des3_ede))",
2521 .cra_driver_name = "authenc-hmac-sha256-"
2522 "cbc-3des-talitos-hsna",
2523 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2524 .cra_flags = CRYPTO_ALG_ASYNC |
2525 CRYPTO_ALG_ALLOCATES_MEMORY,
2526 },
2527 .ivsize = DES3_EDE_BLOCK_SIZE,
2528 .maxauthsize = SHA256_DIGEST_SIZE,
2529 .setkey = aead_des3_setkey,
2530 },
2531 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2532 DESC_HDR_SEL0_DEU |
2533 DESC_HDR_MODE0_DEU_CBC |
2534 DESC_HDR_MODE0_DEU_3DES |
2535 DESC_HDR_SEL1_MDEUA |
2536 DESC_HDR_MODE1_MDEU_INIT |
2537 DESC_HDR_MODE1_MDEU_PAD |
2538 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2539 },
2540 { .type = CRYPTO_ALG_TYPE_AEAD,
2541 .alg.aead = {
2542 .base = {
2543 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2544 .cra_driver_name = "authenc-hmac-sha384-"
2545 "cbc-aes-talitos",
2546 .cra_blocksize = AES_BLOCK_SIZE,
2547 .cra_flags = CRYPTO_ALG_ASYNC |
2548 CRYPTO_ALG_ALLOCATES_MEMORY,
2549 },
2550 .ivsize = AES_BLOCK_SIZE,
2551 .maxauthsize = SHA384_DIGEST_SIZE,
2552 },
2553 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2554 DESC_HDR_SEL0_AESU |
2555 DESC_HDR_MODE0_AESU_CBC |
2556 DESC_HDR_SEL1_MDEUB |
2557 DESC_HDR_MODE1_MDEU_INIT |
2558 DESC_HDR_MODE1_MDEU_PAD |
2559 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2560 },
2561 { .type = CRYPTO_ALG_TYPE_AEAD,
2562 .alg.aead = {
2563 .base = {
2564 .cra_name = "authenc(hmac(sha384),"
2565 "cbc(des3_ede))",
2566 .cra_driver_name = "authenc-hmac-sha384-"
2567 "cbc-3des-talitos",
2568 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2569 .cra_flags = CRYPTO_ALG_ASYNC |
2570 CRYPTO_ALG_ALLOCATES_MEMORY,
2571 },
2572 .ivsize = DES3_EDE_BLOCK_SIZE,
2573 .maxauthsize = SHA384_DIGEST_SIZE,
2574 .setkey = aead_des3_setkey,
2575 },
2576 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2577 DESC_HDR_SEL0_DEU |
2578 DESC_HDR_MODE0_DEU_CBC |
2579 DESC_HDR_MODE0_DEU_3DES |
2580 DESC_HDR_SEL1_MDEUB |
2581 DESC_HDR_MODE1_MDEU_INIT |
2582 DESC_HDR_MODE1_MDEU_PAD |
2583 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2584 },
2585 { .type = CRYPTO_ALG_TYPE_AEAD,
2586 .alg.aead = {
2587 .base = {
2588 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2589 .cra_driver_name = "authenc-hmac-sha512-"
2590 "cbc-aes-talitos",
2591 .cra_blocksize = AES_BLOCK_SIZE,
2592 .cra_flags = CRYPTO_ALG_ASYNC |
2593 CRYPTO_ALG_ALLOCATES_MEMORY,
2594 },
2595 .ivsize = AES_BLOCK_SIZE,
2596 .maxauthsize = SHA512_DIGEST_SIZE,
2597 },
2598 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2599 DESC_HDR_SEL0_AESU |
2600 DESC_HDR_MODE0_AESU_CBC |
2601 DESC_HDR_SEL1_MDEUB |
2602 DESC_HDR_MODE1_MDEU_INIT |
2603 DESC_HDR_MODE1_MDEU_PAD |
2604 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2605 },
2606 { .type = CRYPTO_ALG_TYPE_AEAD,
2607 .alg.aead = {
2608 .base = {
2609 .cra_name = "authenc(hmac(sha512),"
2610 "cbc(des3_ede))",
2611 .cra_driver_name = "authenc-hmac-sha512-"
2612 "cbc-3des-talitos",
2613 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2614 .cra_flags = CRYPTO_ALG_ASYNC |
2615 CRYPTO_ALG_ALLOCATES_MEMORY,
2616 },
2617 .ivsize = DES3_EDE_BLOCK_SIZE,
2618 .maxauthsize = SHA512_DIGEST_SIZE,
2619 .setkey = aead_des3_setkey,
2620 },
2621 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2622 DESC_HDR_SEL0_DEU |
2623 DESC_HDR_MODE0_DEU_CBC |
2624 DESC_HDR_MODE0_DEU_3DES |
2625 DESC_HDR_SEL1_MDEUB |
2626 DESC_HDR_MODE1_MDEU_INIT |
2627 DESC_HDR_MODE1_MDEU_PAD |
2628 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2629 },
2630 { .type = CRYPTO_ALG_TYPE_AEAD,
2631 .alg.aead = {
2632 .base = {
2633 .cra_name = "authenc(hmac(md5),cbc(aes))",
2634 .cra_driver_name = "authenc-hmac-md5-"
2635 "cbc-aes-talitos",
2636 .cra_blocksize = AES_BLOCK_SIZE,
2637 .cra_flags = CRYPTO_ALG_ASYNC |
2638 CRYPTO_ALG_ALLOCATES_MEMORY,
2639 },
2640 .ivsize = AES_BLOCK_SIZE,
2641 .maxauthsize = MD5_DIGEST_SIZE,
2642 },
2643 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2644 DESC_HDR_SEL0_AESU |
2645 DESC_HDR_MODE0_AESU_CBC |
2646 DESC_HDR_SEL1_MDEUA |
2647 DESC_HDR_MODE1_MDEU_INIT |
2648 DESC_HDR_MODE1_MDEU_PAD |
2649 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2650 },
2651 { .type = CRYPTO_ALG_TYPE_AEAD,
2652 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2653 .alg.aead = {
2654 .base = {
2655 .cra_name = "authenc(hmac(md5),cbc(aes))",
2656 .cra_driver_name = "authenc-hmac-md5-"
2657 "cbc-aes-talitos-hsna",
2658 .cra_blocksize = AES_BLOCK_SIZE,
2659 .cra_flags = CRYPTO_ALG_ASYNC |
2660 CRYPTO_ALG_ALLOCATES_MEMORY,
2661 },
2662 .ivsize = AES_BLOCK_SIZE,
2663 .maxauthsize = MD5_DIGEST_SIZE,
2664 },
2665 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2666 DESC_HDR_SEL0_AESU |
2667 DESC_HDR_MODE0_AESU_CBC |
2668 DESC_HDR_SEL1_MDEUA |
2669 DESC_HDR_MODE1_MDEU_INIT |
2670 DESC_HDR_MODE1_MDEU_PAD |
2671 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2672 },
2673 { .type = CRYPTO_ALG_TYPE_AEAD,
2674 .alg.aead = {
2675 .base = {
2676 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2677 .cra_driver_name = "authenc-hmac-md5-"
2678 "cbc-3des-talitos",
2679 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2680 .cra_flags = CRYPTO_ALG_ASYNC |
2681 CRYPTO_ALG_ALLOCATES_MEMORY,
2682 },
2683 .ivsize = DES3_EDE_BLOCK_SIZE,
2684 .maxauthsize = MD5_DIGEST_SIZE,
2685 .setkey = aead_des3_setkey,
2686 },
2687 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2688 DESC_HDR_SEL0_DEU |
2689 DESC_HDR_MODE0_DEU_CBC |
2690 DESC_HDR_MODE0_DEU_3DES |
2691 DESC_HDR_SEL1_MDEUA |
2692 DESC_HDR_MODE1_MDEU_INIT |
2693 DESC_HDR_MODE1_MDEU_PAD |
2694 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2695 },
2696 { .type = CRYPTO_ALG_TYPE_AEAD,
2697 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2698 .alg.aead = {
2699 .base = {
2700 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2701 .cra_driver_name = "authenc-hmac-md5-"
2702 "cbc-3des-talitos-hsna",
2703 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2704 .cra_flags = CRYPTO_ALG_ASYNC |
2705 CRYPTO_ALG_ALLOCATES_MEMORY,
2706 },
2707 .ivsize = DES3_EDE_BLOCK_SIZE,
2708 .maxauthsize = MD5_DIGEST_SIZE,
2709 .setkey = aead_des3_setkey,
2710 },
2711 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2712 DESC_HDR_SEL0_DEU |
2713 DESC_HDR_MODE0_DEU_CBC |
2714 DESC_HDR_MODE0_DEU_3DES |
2715 DESC_HDR_SEL1_MDEUA |
2716 DESC_HDR_MODE1_MDEU_INIT |
2717 DESC_HDR_MODE1_MDEU_PAD |
2718 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2719 },
2720 /* SKCIPHER algorithms. */
2721 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2722 .alg.skcipher = {
2723 .base.cra_name = "ecb(aes)",
2724 .base.cra_driver_name = "ecb-aes-talitos",
2725 .base.cra_blocksize = AES_BLOCK_SIZE,
2726 .base.cra_flags = CRYPTO_ALG_ASYNC |
2727 CRYPTO_ALG_ALLOCATES_MEMORY,
2728 .min_keysize = AES_MIN_KEY_SIZE,
2729 .max_keysize = AES_MAX_KEY_SIZE,
2730 .setkey = skcipher_aes_setkey,
2731 },
2732 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2733 DESC_HDR_SEL0_AESU,
2734 },
2735 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2736 .alg.skcipher = {
2737 .base.cra_name = "cbc(aes)",
2738 .base.cra_driver_name = "cbc-aes-talitos",
2739 .base.cra_blocksize = AES_BLOCK_SIZE,
2740 .base.cra_flags = CRYPTO_ALG_ASYNC |
2741 CRYPTO_ALG_ALLOCATES_MEMORY,
2742 .min_keysize = AES_MIN_KEY_SIZE,
2743 .max_keysize = AES_MAX_KEY_SIZE,
2744 .ivsize = AES_BLOCK_SIZE,
2745 .setkey = skcipher_aes_setkey,
2746 },
2747 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2748 DESC_HDR_SEL0_AESU |
2749 DESC_HDR_MODE0_AESU_CBC,
2750 },
2751 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2752 .alg.skcipher = {
2753 .base.cra_name = "ctr(aes)",
2754 .base.cra_driver_name = "ctr-aes-talitos",
2755 .base.cra_blocksize = 1,
2756 .base.cra_flags = CRYPTO_ALG_ASYNC |
2757 CRYPTO_ALG_ALLOCATES_MEMORY,
2758 .min_keysize = AES_MIN_KEY_SIZE,
2759 .max_keysize = AES_MAX_KEY_SIZE,
2760 .ivsize = AES_BLOCK_SIZE,
2761 .setkey = skcipher_aes_setkey,
2762 },
2763 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2764 DESC_HDR_SEL0_AESU |
2765 DESC_HDR_MODE0_AESU_CTR,
2766 },
2767 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2768 .alg.skcipher = {
2769 .base.cra_name = "ctr(aes)",
2770 .base.cra_driver_name = "ctr-aes-talitos",
2771 .base.cra_blocksize = 1,
2772 .base.cra_flags = CRYPTO_ALG_ASYNC |
2773 CRYPTO_ALG_ALLOCATES_MEMORY,
2774 .min_keysize = AES_MIN_KEY_SIZE,
2775 .max_keysize = AES_MAX_KEY_SIZE,
2776 .ivsize = AES_BLOCK_SIZE,
2777 .setkey = skcipher_aes_setkey,
2778 },
2779 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2780 DESC_HDR_SEL0_AESU |
2781 DESC_HDR_MODE0_AESU_CTR,
2782 },
2783 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2784 .alg.skcipher = {
2785 .base.cra_name = "ecb(des)",
2786 .base.cra_driver_name = "ecb-des-talitos",
2787 .base.cra_blocksize = DES_BLOCK_SIZE,
2788 .base.cra_flags = CRYPTO_ALG_ASYNC |
2789 CRYPTO_ALG_ALLOCATES_MEMORY,
2790 .min_keysize = DES_KEY_SIZE,
2791 .max_keysize = DES_KEY_SIZE,
2792 .setkey = skcipher_des_setkey,
2793 },
2794 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2795 DESC_HDR_SEL0_DEU,
2796 },
2797 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2798 .alg.skcipher = {
2799 .base.cra_name = "cbc(des)",
2800 .base.cra_driver_name = "cbc-des-talitos",
2801 .base.cra_blocksize = DES_BLOCK_SIZE,
2802 .base.cra_flags = CRYPTO_ALG_ASYNC |
2803 CRYPTO_ALG_ALLOCATES_MEMORY,
2804 .min_keysize = DES_KEY_SIZE,
2805 .max_keysize = DES_KEY_SIZE,
2806 .ivsize = DES_BLOCK_SIZE,
2807 .setkey = skcipher_des_setkey,
2808 },
2809 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2810 DESC_HDR_SEL0_DEU |
2811 DESC_HDR_MODE0_DEU_CBC,
2812 },
2813 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2814 .alg.skcipher = {
2815 .base.cra_name = "ecb(des3_ede)",
2816 .base.cra_driver_name = "ecb-3des-talitos",
2817 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2818 .base.cra_flags = CRYPTO_ALG_ASYNC |
2819 CRYPTO_ALG_ALLOCATES_MEMORY,
2820 .min_keysize = DES3_EDE_KEY_SIZE,
2821 .max_keysize = DES3_EDE_KEY_SIZE,
2822 .setkey = skcipher_des3_setkey,
2823 },
2824 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2825 DESC_HDR_SEL0_DEU |
2826 DESC_HDR_MODE0_DEU_3DES,
2827 },
2828 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2829 .alg.skcipher = {
2830 .base.cra_name = "cbc(des3_ede)",
2831 .base.cra_driver_name = "cbc-3des-talitos",
2832 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2833 .base.cra_flags = CRYPTO_ALG_ASYNC |
2834 CRYPTO_ALG_ALLOCATES_MEMORY,
2835 .min_keysize = DES3_EDE_KEY_SIZE,
2836 .max_keysize = DES3_EDE_KEY_SIZE,
2837 .ivsize = DES3_EDE_BLOCK_SIZE,
2838 .setkey = skcipher_des3_setkey,
2839 },
2840 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2841 DESC_HDR_SEL0_DEU |
2842 DESC_HDR_MODE0_DEU_CBC |
2843 DESC_HDR_MODE0_DEU_3DES,
2844 },
2845 /* AHASH algorithms. */
2846 { .type = CRYPTO_ALG_TYPE_AHASH,
2847 .alg.hash = {
2848 .halg.digestsize = MD5_DIGEST_SIZE,
2849 .halg.statesize = sizeof(struct talitos_export_state),
2850 .halg.base = {
2851 .cra_name = "md5",
2852 .cra_driver_name = "md5-talitos",
2853 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2854 .cra_flags = CRYPTO_ALG_ASYNC |
2855 CRYPTO_ALG_ALLOCATES_MEMORY,
2856 }
2857 },
2858 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2859 DESC_HDR_SEL0_MDEUA |
2860 DESC_HDR_MODE0_MDEU_MD5,
2861 },
2862 { .type = CRYPTO_ALG_TYPE_AHASH,
2863 .alg.hash = {
2864 .halg.digestsize = SHA1_DIGEST_SIZE,
2865 .halg.statesize = sizeof(struct talitos_export_state),
2866 .halg.base = {
2867 .cra_name = "sha1",
2868 .cra_driver_name = "sha1-talitos",
2869 .cra_blocksize = SHA1_BLOCK_SIZE,
2870 .cra_flags = CRYPTO_ALG_ASYNC |
2871 CRYPTO_ALG_ALLOCATES_MEMORY,
2872 }
2873 },
2874 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2875 DESC_HDR_SEL0_MDEUA |
2876 DESC_HDR_MODE0_MDEU_SHA1,
2877 },
2878 { .type = CRYPTO_ALG_TYPE_AHASH,
2879 .alg.hash = {
2880 .halg.digestsize = SHA224_DIGEST_SIZE,
2881 .halg.statesize = sizeof(struct talitos_export_state),
2882 .halg.base = {
2883 .cra_name = "sha224",
2884 .cra_driver_name = "sha224-talitos",
2885 .cra_blocksize = SHA224_BLOCK_SIZE,
2886 .cra_flags = CRYPTO_ALG_ASYNC |
2887 CRYPTO_ALG_ALLOCATES_MEMORY,
2888 }
2889 },
2890 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2891 DESC_HDR_SEL0_MDEUA |
2892 DESC_HDR_MODE0_MDEU_SHA224,
2893 },
2894 { .type = CRYPTO_ALG_TYPE_AHASH,
2895 .alg.hash = {
2896 .halg.digestsize = SHA256_DIGEST_SIZE,
2897 .halg.statesize = sizeof(struct talitos_export_state),
2898 .halg.base = {
2899 .cra_name = "sha256",
2900 .cra_driver_name = "sha256-talitos",
2901 .cra_blocksize = SHA256_BLOCK_SIZE,
2902 .cra_flags = CRYPTO_ALG_ASYNC |
2903 CRYPTO_ALG_ALLOCATES_MEMORY,
2904 }
2905 },
2906 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2907 DESC_HDR_SEL0_MDEUA |
2908 DESC_HDR_MODE0_MDEU_SHA256,
2909 },
2910 { .type = CRYPTO_ALG_TYPE_AHASH,
2911 .alg.hash = {
2912 .halg.digestsize = SHA384_DIGEST_SIZE,
2913 .halg.statesize = sizeof(struct talitos_export_state),
2914 .halg.base = {
2915 .cra_name = "sha384",
2916 .cra_driver_name = "sha384-talitos",
2917 .cra_blocksize = SHA384_BLOCK_SIZE,
2918 .cra_flags = CRYPTO_ALG_ASYNC |
2919 CRYPTO_ALG_ALLOCATES_MEMORY,
2920 }
2921 },
2922 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2923 DESC_HDR_SEL0_MDEUB |
2924 DESC_HDR_MODE0_MDEUB_SHA384,
2925 },
2926 { .type = CRYPTO_ALG_TYPE_AHASH,
2927 .alg.hash = {
2928 .halg.digestsize = SHA512_DIGEST_SIZE,
2929 .halg.statesize = sizeof(struct talitos_export_state),
2930 .halg.base = {
2931 .cra_name = "sha512",
2932 .cra_driver_name = "sha512-talitos",
2933 .cra_blocksize = SHA512_BLOCK_SIZE,
2934 .cra_flags = CRYPTO_ALG_ASYNC |
2935 CRYPTO_ALG_ALLOCATES_MEMORY,
2936 }
2937 },
2938 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2939 DESC_HDR_SEL0_MDEUB |
2940 DESC_HDR_MODE0_MDEUB_SHA512,
2941 },
2942 { .type = CRYPTO_ALG_TYPE_AHASH,
2943 .alg.hash = {
2944 .halg.digestsize = MD5_DIGEST_SIZE,
2945 .halg.statesize = sizeof(struct talitos_export_state),
2946 .halg.base = {
2947 .cra_name = "hmac(md5)",
2948 .cra_driver_name = "hmac-md5-talitos",
2949 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2950 .cra_flags = CRYPTO_ALG_ASYNC |
2951 CRYPTO_ALG_ALLOCATES_MEMORY,
2952 }
2953 },
2954 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2955 DESC_HDR_SEL0_MDEUA |
2956 DESC_HDR_MODE0_MDEU_MD5,
2957 },
2958 { .type = CRYPTO_ALG_TYPE_AHASH,
2959 .alg.hash = {
2960 .halg.digestsize = SHA1_DIGEST_SIZE,
2961 .halg.statesize = sizeof(struct talitos_export_state),
2962 .halg.base = {
2963 .cra_name = "hmac(sha1)",
2964 .cra_driver_name = "hmac-sha1-talitos",
2965 .cra_blocksize = SHA1_BLOCK_SIZE,
2966 .cra_flags = CRYPTO_ALG_ASYNC |
2967 CRYPTO_ALG_ALLOCATES_MEMORY,
2968 }
2969 },
2970 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2971 DESC_HDR_SEL0_MDEUA |
2972 DESC_HDR_MODE0_MDEU_SHA1,
2973 },
2974 { .type = CRYPTO_ALG_TYPE_AHASH,
2975 .alg.hash = {
2976 .halg.digestsize = SHA224_DIGEST_SIZE,
2977 .halg.statesize = sizeof(struct talitos_export_state),
2978 .halg.base = {
2979 .cra_name = "hmac(sha224)",
2980 .cra_driver_name = "hmac-sha224-talitos",
2981 .cra_blocksize = SHA224_BLOCK_SIZE,
2982 .cra_flags = CRYPTO_ALG_ASYNC |
2983 CRYPTO_ALG_ALLOCATES_MEMORY,
2984 }
2985 },
2986 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2987 DESC_HDR_SEL0_MDEUA |
2988 DESC_HDR_MODE0_MDEU_SHA224,
2989 },
2990 { .type = CRYPTO_ALG_TYPE_AHASH,
2991 .alg.hash = {
2992 .halg.digestsize = SHA256_DIGEST_SIZE,
2993 .halg.statesize = sizeof(struct talitos_export_state),
2994 .halg.base = {
2995 .cra_name = "hmac(sha256)",
2996 .cra_driver_name = "hmac-sha256-talitos",
2997 .cra_blocksize = SHA256_BLOCK_SIZE,
2998 .cra_flags = CRYPTO_ALG_ASYNC |
2999 CRYPTO_ALG_ALLOCATES_MEMORY,
3000 }
3001 },
3002 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3003 DESC_HDR_SEL0_MDEUA |
3004 DESC_HDR_MODE0_MDEU_SHA256,
3005 },
3006 { .type = CRYPTO_ALG_TYPE_AHASH,
3007 .alg.hash = {
3008 .halg.digestsize = SHA384_DIGEST_SIZE,
3009 .halg.statesize = sizeof(struct talitos_export_state),
3010 .halg.base = {
3011 .cra_name = "hmac(sha384)",
3012 .cra_driver_name = "hmac-sha384-talitos",
3013 .cra_blocksize = SHA384_BLOCK_SIZE,
3014 .cra_flags = CRYPTO_ALG_ASYNC |
3015 CRYPTO_ALG_ALLOCATES_MEMORY,
3016 }
3017 },
3018 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3019 DESC_HDR_SEL0_MDEUB |
3020 DESC_HDR_MODE0_MDEUB_SHA384,
3021 },
3022 { .type = CRYPTO_ALG_TYPE_AHASH,
3023 .alg.hash = {
3024 .halg.digestsize = SHA512_DIGEST_SIZE,
3025 .halg.statesize = sizeof(struct talitos_export_state),
3026 .halg.base = {
3027 .cra_name = "hmac(sha512)",
3028 .cra_driver_name = "hmac-sha512-talitos",
3029 .cra_blocksize = SHA512_BLOCK_SIZE,
3030 .cra_flags = CRYPTO_ALG_ASYNC |
3031 CRYPTO_ALG_ALLOCATES_MEMORY,
3032 }
3033 },
3034 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3035 DESC_HDR_SEL0_MDEUB |
3036 DESC_HDR_MODE0_MDEUB_SHA512,
3037 }
3038 };
3039
3040 struct talitos_crypto_alg {
3041 struct list_head entry;
3042 struct device *dev;
3043 struct talitos_alg_template algt;
3044 };
3045
talitos_init_common(struct talitos_ctx * ctx,struct talitos_crypto_alg * talitos_alg)3046 static int talitos_init_common(struct talitos_ctx *ctx,
3047 struct talitos_crypto_alg *talitos_alg)
3048 {
3049 struct talitos_private *priv;
3050
3051 /* update context with ptr to dev */
3052 ctx->dev = talitos_alg->dev;
3053
3054 /* assign SEC channel to tfm in round-robin fashion */
3055 priv = dev_get_drvdata(ctx->dev);
3056 ctx->ch = atomic_inc_return(&priv->last_chan) &
3057 (priv->num_channels - 1);
3058
3059 /* copy descriptor header template value */
3060 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3061
3062 /* select done notification */
3063 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3064
3065 return 0;
3066 }
3067
talitos_cra_init_aead(struct crypto_aead * tfm)3068 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3069 {
3070 struct aead_alg *alg = crypto_aead_alg(tfm);
3071 struct talitos_crypto_alg *talitos_alg;
3072 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3073
3074 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3075 algt.alg.aead);
3076
3077 return talitos_init_common(ctx, talitos_alg);
3078 }
3079
talitos_cra_init_skcipher(struct crypto_skcipher * tfm)3080 static int talitos_cra_init_skcipher(struct crypto_skcipher *tfm)
3081 {
3082 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3083 struct talitos_crypto_alg *talitos_alg;
3084 struct talitos_ctx *ctx = crypto_skcipher_ctx(tfm);
3085
3086 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3087 algt.alg.skcipher);
3088
3089 return talitos_init_common(ctx, talitos_alg);
3090 }
3091
talitos_cra_init_ahash(struct crypto_tfm * tfm)3092 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3093 {
3094 struct crypto_alg *alg = tfm->__crt_alg;
3095 struct talitos_crypto_alg *talitos_alg;
3096 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3097
3098 talitos_alg = container_of(__crypto_ahash_alg(alg),
3099 struct talitos_crypto_alg,
3100 algt.alg.hash);
3101
3102 ctx->keylen = 0;
3103 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3104 sizeof(struct talitos_ahash_req_ctx));
3105
3106 return talitos_init_common(ctx, talitos_alg);
3107 }
3108
talitos_cra_exit(struct crypto_tfm * tfm)3109 static void talitos_cra_exit(struct crypto_tfm *tfm)
3110 {
3111 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3112 struct device *dev = ctx->dev;
3113
3114 if (ctx->keylen)
3115 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3116 }
3117
3118 /*
3119 * given the alg's descriptor header template, determine whether descriptor
3120 * type and primary/secondary execution units required match the hw
3121 * capabilities description provided in the device tree node.
3122 */
hw_supports(struct device * dev,__be32 desc_hdr_template)3123 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3124 {
3125 struct talitos_private *priv = dev_get_drvdata(dev);
3126 int ret;
3127
3128 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3129 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3130
3131 if (SECONDARY_EU(desc_hdr_template))
3132 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3133 & priv->exec_units);
3134
3135 return ret;
3136 }
3137
talitos_remove(struct platform_device * ofdev)3138 static int talitos_remove(struct platform_device *ofdev)
3139 {
3140 struct device *dev = &ofdev->dev;
3141 struct talitos_private *priv = dev_get_drvdata(dev);
3142 struct talitos_crypto_alg *t_alg, *n;
3143 int i;
3144
3145 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3146 switch (t_alg->algt.type) {
3147 case CRYPTO_ALG_TYPE_SKCIPHER:
3148 crypto_unregister_skcipher(&t_alg->algt.alg.skcipher);
3149 break;
3150 case CRYPTO_ALG_TYPE_AEAD:
3151 crypto_unregister_aead(&t_alg->algt.alg.aead);
3152 break;
3153 case CRYPTO_ALG_TYPE_AHASH:
3154 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3155 break;
3156 }
3157 list_del(&t_alg->entry);
3158 }
3159
3160 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3161 talitos_unregister_rng(dev);
3162
3163 for (i = 0; i < 2; i++)
3164 if (priv->irq[i]) {
3165 free_irq(priv->irq[i], dev);
3166 irq_dispose_mapping(priv->irq[i]);
3167 }
3168
3169 tasklet_kill(&priv->done_task[0]);
3170 if (priv->irq[1])
3171 tasklet_kill(&priv->done_task[1]);
3172
3173 return 0;
3174 }
3175
talitos_alg_alloc(struct device * dev,struct talitos_alg_template * template)3176 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3177 struct talitos_alg_template
3178 *template)
3179 {
3180 struct talitos_private *priv = dev_get_drvdata(dev);
3181 struct talitos_crypto_alg *t_alg;
3182 struct crypto_alg *alg;
3183
3184 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3185 GFP_KERNEL);
3186 if (!t_alg)
3187 return ERR_PTR(-ENOMEM);
3188
3189 t_alg->algt = *template;
3190
3191 switch (t_alg->algt.type) {
3192 case CRYPTO_ALG_TYPE_SKCIPHER:
3193 alg = &t_alg->algt.alg.skcipher.base;
3194 alg->cra_exit = talitos_cra_exit;
3195 t_alg->algt.alg.skcipher.init = talitos_cra_init_skcipher;
3196 t_alg->algt.alg.skcipher.setkey =
3197 t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey;
3198 t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt;
3199 t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt;
3200 if (!strcmp(alg->cra_name, "ctr(aes)") && !has_ftr_sec1(priv) &&
3201 DESC_TYPE(t_alg->algt.desc_hdr_template) !=
3202 DESC_TYPE(DESC_HDR_TYPE_AESU_CTR_NONSNOOP)) {
3203 devm_kfree(dev, t_alg);
3204 return ERR_PTR(-ENOTSUPP);
3205 }
3206 break;
3207 case CRYPTO_ALG_TYPE_AEAD:
3208 alg = &t_alg->algt.alg.aead.base;
3209 alg->cra_exit = talitos_cra_exit;
3210 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3211 t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3212 aead_setkey;
3213 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3214 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3215 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3216 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3217 devm_kfree(dev, t_alg);
3218 return ERR_PTR(-ENOTSUPP);
3219 }
3220 break;
3221 case CRYPTO_ALG_TYPE_AHASH:
3222 alg = &t_alg->algt.alg.hash.halg.base;
3223 alg->cra_init = talitos_cra_init_ahash;
3224 alg->cra_exit = talitos_cra_exit;
3225 t_alg->algt.alg.hash.init = ahash_init;
3226 t_alg->algt.alg.hash.update = ahash_update;
3227 t_alg->algt.alg.hash.final = ahash_final;
3228 t_alg->algt.alg.hash.finup = ahash_finup;
3229 t_alg->algt.alg.hash.digest = ahash_digest;
3230 if (!strncmp(alg->cra_name, "hmac", 4))
3231 t_alg->algt.alg.hash.setkey = ahash_setkey;
3232 t_alg->algt.alg.hash.import = ahash_import;
3233 t_alg->algt.alg.hash.export = ahash_export;
3234
3235 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3236 !strncmp(alg->cra_name, "hmac", 4)) {
3237 devm_kfree(dev, t_alg);
3238 return ERR_PTR(-ENOTSUPP);
3239 }
3240 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3241 (!strcmp(alg->cra_name, "sha224") ||
3242 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3243 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3244 t_alg->algt.desc_hdr_template =
3245 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3246 DESC_HDR_SEL0_MDEUA |
3247 DESC_HDR_MODE0_MDEU_SHA256;
3248 }
3249 break;
3250 default:
3251 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3252 devm_kfree(dev, t_alg);
3253 return ERR_PTR(-EINVAL);
3254 }
3255
3256 alg->cra_module = THIS_MODULE;
3257 if (t_alg->algt.priority)
3258 alg->cra_priority = t_alg->algt.priority;
3259 else
3260 alg->cra_priority = TALITOS_CRA_PRIORITY;
3261 if (has_ftr_sec1(priv))
3262 alg->cra_alignmask = 3;
3263 else
3264 alg->cra_alignmask = 0;
3265 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3266 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3267
3268 t_alg->dev = dev;
3269
3270 return t_alg;
3271 }
3272
talitos_probe_irq(struct platform_device * ofdev)3273 static int talitos_probe_irq(struct platform_device *ofdev)
3274 {
3275 struct device *dev = &ofdev->dev;
3276 struct device_node *np = ofdev->dev.of_node;
3277 struct talitos_private *priv = dev_get_drvdata(dev);
3278 int err;
3279 bool is_sec1 = has_ftr_sec1(priv);
3280
3281 priv->irq[0] = irq_of_parse_and_map(np, 0);
3282 if (!priv->irq[0]) {
3283 dev_err(dev, "failed to map irq\n");
3284 return -EINVAL;
3285 }
3286 if (is_sec1) {
3287 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3288 dev_driver_string(dev), dev);
3289 goto primary_out;
3290 }
3291
3292 priv->irq[1] = irq_of_parse_and_map(np, 1);
3293
3294 /* get the primary irq line */
3295 if (!priv->irq[1]) {
3296 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3297 dev_driver_string(dev), dev);
3298 goto primary_out;
3299 }
3300
3301 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3302 dev_driver_string(dev), dev);
3303 if (err)
3304 goto primary_out;
3305
3306 /* get the secondary irq line */
3307 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3308 dev_driver_string(dev), dev);
3309 if (err) {
3310 dev_err(dev, "failed to request secondary irq\n");
3311 irq_dispose_mapping(priv->irq[1]);
3312 priv->irq[1] = 0;
3313 }
3314
3315 return err;
3316
3317 primary_out:
3318 if (err) {
3319 dev_err(dev, "failed to request primary irq\n");
3320 irq_dispose_mapping(priv->irq[0]);
3321 priv->irq[0] = 0;
3322 }
3323
3324 return err;
3325 }
3326
talitos_probe(struct platform_device * ofdev)3327 static int talitos_probe(struct platform_device *ofdev)
3328 {
3329 struct device *dev = &ofdev->dev;
3330 struct device_node *np = ofdev->dev.of_node;
3331 struct talitos_private *priv;
3332 int i, err;
3333 int stride;
3334 struct resource *res;
3335
3336 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3337 if (!priv)
3338 return -ENOMEM;
3339
3340 INIT_LIST_HEAD(&priv->alg_list);
3341
3342 dev_set_drvdata(dev, priv);
3343
3344 priv->ofdev = ofdev;
3345
3346 spin_lock_init(&priv->reg_lock);
3347
3348 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3349 if (!res)
3350 return -ENXIO;
3351 priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3352 if (!priv->reg) {
3353 dev_err(dev, "failed to of_iomap\n");
3354 err = -ENOMEM;
3355 goto err_out;
3356 }
3357
3358 /* get SEC version capabilities from device tree */
3359 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3360 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3361 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3362 of_property_read_u32(np, "fsl,descriptor-types-mask",
3363 &priv->desc_types);
3364
3365 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3366 !priv->exec_units || !priv->desc_types) {
3367 dev_err(dev, "invalid property data in device tree node\n");
3368 err = -EINVAL;
3369 goto err_out;
3370 }
3371
3372 if (of_device_is_compatible(np, "fsl,sec3.0"))
3373 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3374
3375 if (of_device_is_compatible(np, "fsl,sec2.1"))
3376 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3377 TALITOS_FTR_SHA224_HWINIT |
3378 TALITOS_FTR_HMAC_OK;
3379
3380 if (of_device_is_compatible(np, "fsl,sec1.0"))
3381 priv->features |= TALITOS_FTR_SEC1;
3382
3383 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3384 priv->reg_deu = priv->reg + TALITOS12_DEU;
3385 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3386 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3387 stride = TALITOS1_CH_STRIDE;
3388 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3389 priv->reg_deu = priv->reg + TALITOS10_DEU;
3390 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3391 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3392 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3393 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3394 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3395 stride = TALITOS1_CH_STRIDE;
3396 } else {
3397 priv->reg_deu = priv->reg + TALITOS2_DEU;
3398 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3399 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3400 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3401 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3402 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3403 priv->reg_keu = priv->reg + TALITOS2_KEU;
3404 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3405 stride = TALITOS2_CH_STRIDE;
3406 }
3407
3408 err = talitos_probe_irq(ofdev);
3409 if (err)
3410 goto err_out;
3411
3412 if (has_ftr_sec1(priv)) {
3413 if (priv->num_channels == 1)
3414 tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3415 (unsigned long)dev);
3416 else
3417 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3418 (unsigned long)dev);
3419 } else {
3420 if (priv->irq[1]) {
3421 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3422 (unsigned long)dev);
3423 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3424 (unsigned long)dev);
3425 } else if (priv->num_channels == 1) {
3426 tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3427 (unsigned long)dev);
3428 } else {
3429 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3430 (unsigned long)dev);
3431 }
3432 }
3433
3434 priv->chan = devm_kcalloc(dev,
3435 priv->num_channels,
3436 sizeof(struct talitos_channel),
3437 GFP_KERNEL);
3438 if (!priv->chan) {
3439 dev_err(dev, "failed to allocate channel management space\n");
3440 err = -ENOMEM;
3441 goto err_out;
3442 }
3443
3444 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3445
3446 for (i = 0; i < priv->num_channels; i++) {
3447 priv->chan[i].reg = priv->reg + stride * (i + 1);
3448 if (!priv->irq[1] || !(i & 1))
3449 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3450
3451 spin_lock_init(&priv->chan[i].head_lock);
3452 spin_lock_init(&priv->chan[i].tail_lock);
3453
3454 priv->chan[i].fifo = devm_kcalloc(dev,
3455 priv->fifo_len,
3456 sizeof(struct talitos_request),
3457 GFP_KERNEL);
3458 if (!priv->chan[i].fifo) {
3459 dev_err(dev, "failed to allocate request fifo %d\n", i);
3460 err = -ENOMEM;
3461 goto err_out;
3462 }
3463
3464 atomic_set(&priv->chan[i].submit_count,
3465 -(priv->chfifo_len - 1));
3466 }
3467
3468 dma_set_mask(dev, DMA_BIT_MASK(36));
3469
3470 /* reset and initialize the h/w */
3471 err = init_device(dev);
3472 if (err) {
3473 dev_err(dev, "failed to initialize device\n");
3474 goto err_out;
3475 }
3476
3477 /* register the RNG, if available */
3478 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3479 err = talitos_register_rng(dev);
3480 if (err) {
3481 dev_err(dev, "failed to register hwrng: %d\n", err);
3482 goto err_out;
3483 } else
3484 dev_info(dev, "hwrng\n");
3485 }
3486
3487 /* register crypto algorithms the device supports */
3488 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3489 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3490 struct talitos_crypto_alg *t_alg;
3491 struct crypto_alg *alg = NULL;
3492
3493 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3494 if (IS_ERR(t_alg)) {
3495 err = PTR_ERR(t_alg);
3496 if (err == -ENOTSUPP)
3497 continue;
3498 goto err_out;
3499 }
3500
3501 switch (t_alg->algt.type) {
3502 case CRYPTO_ALG_TYPE_SKCIPHER:
3503 err = crypto_register_skcipher(
3504 &t_alg->algt.alg.skcipher);
3505 alg = &t_alg->algt.alg.skcipher.base;
3506 break;
3507
3508 case CRYPTO_ALG_TYPE_AEAD:
3509 err = crypto_register_aead(
3510 &t_alg->algt.alg.aead);
3511 alg = &t_alg->algt.alg.aead.base;
3512 break;
3513
3514 case CRYPTO_ALG_TYPE_AHASH:
3515 err = crypto_register_ahash(
3516 &t_alg->algt.alg.hash);
3517 alg = &t_alg->algt.alg.hash.halg.base;
3518 break;
3519 }
3520 if (err) {
3521 dev_err(dev, "%s alg registration failed\n",
3522 alg->cra_driver_name);
3523 devm_kfree(dev, t_alg);
3524 } else
3525 list_add_tail(&t_alg->entry, &priv->alg_list);
3526 }
3527 }
3528 if (!list_empty(&priv->alg_list))
3529 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3530 (char *)of_get_property(np, "compatible", NULL));
3531
3532 return 0;
3533
3534 err_out:
3535 talitos_remove(ofdev);
3536
3537 return err;
3538 }
3539
3540 static const struct of_device_id talitos_match[] = {
3541 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3542 {
3543 .compatible = "fsl,sec1.0",
3544 },
3545 #endif
3546 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3547 {
3548 .compatible = "fsl,sec2.0",
3549 },
3550 #endif
3551 {},
3552 };
3553 MODULE_DEVICE_TABLE(of, talitos_match);
3554
3555 static struct platform_driver talitos_driver = {
3556 .driver = {
3557 .name = "talitos",
3558 .of_match_table = talitos_match,
3559 },
3560 .probe = talitos_probe,
3561 .remove = talitos_remove,
3562 };
3563
3564 module_platform_driver(talitos_driver);
3565
3566 MODULE_LICENSE("GPL");
3567 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3568 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
3569