1 /*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55
56 #include "talitos.h"
57
to_talitos_ptr(struct talitos_ptr * ptr,dma_addr_t dma_addr,bool is_sec1)58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 bool is_sec1)
60 {
61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 if (!is_sec1)
63 ptr->eptr = upper_32_bits(dma_addr);
64 }
65
copy_talitos_ptr(struct talitos_ptr * dst_ptr,struct talitos_ptr * src_ptr,bool is_sec1)66 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67 struct talitos_ptr *src_ptr, bool is_sec1)
68 {
69 dst_ptr->ptr = src_ptr->ptr;
70 if (!is_sec1)
71 dst_ptr->eptr = src_ptr->eptr;
72 }
73
to_talitos_ptr_len(struct talitos_ptr * ptr,unsigned int len,bool is_sec1)74 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
75 bool is_sec1)
76 {
77 if (is_sec1) {
78 ptr->res = 0;
79 ptr->len1 = cpu_to_be16(len);
80 } else {
81 ptr->len = cpu_to_be16(len);
82 }
83 }
84
from_talitos_ptr_len(struct talitos_ptr * ptr,bool is_sec1)85 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
86 bool is_sec1)
87 {
88 if (is_sec1)
89 return be16_to_cpu(ptr->len1);
90 else
91 return be16_to_cpu(ptr->len);
92 }
93
to_talitos_ptr_ext_set(struct talitos_ptr * ptr,u8 val,bool is_sec1)94 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
95 bool is_sec1)
96 {
97 if (!is_sec1)
98 ptr->j_extent = val;
99 }
100
to_talitos_ptr_ext_or(struct talitos_ptr * ptr,u8 val,bool is_sec1)101 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
102 {
103 if (!is_sec1)
104 ptr->j_extent |= val;
105 }
106
107 /*
108 * map virtual single (contiguous) pointer to h/w descriptor pointer
109 */
map_single_talitos_ptr(struct device * dev,struct talitos_ptr * ptr,unsigned int len,void * data,enum dma_data_direction dir)110 static void map_single_talitos_ptr(struct device *dev,
111 struct talitos_ptr *ptr,
112 unsigned int len, void *data,
113 enum dma_data_direction dir)
114 {
115 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
116 struct talitos_private *priv = dev_get_drvdata(dev);
117 bool is_sec1 = has_ftr_sec1(priv);
118
119 to_talitos_ptr_len(ptr, len, is_sec1);
120 to_talitos_ptr(ptr, dma_addr, is_sec1);
121 to_talitos_ptr_ext_set(ptr, 0, is_sec1);
122 }
123
124 /*
125 * unmap bus single (contiguous) h/w descriptor pointer
126 */
unmap_single_talitos_ptr(struct device * dev,struct talitos_ptr * ptr,enum dma_data_direction dir)127 static void unmap_single_talitos_ptr(struct device *dev,
128 struct talitos_ptr *ptr,
129 enum dma_data_direction dir)
130 {
131 struct talitos_private *priv = dev_get_drvdata(dev);
132 bool is_sec1 = has_ftr_sec1(priv);
133
134 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
135 from_talitos_ptr_len(ptr, is_sec1), dir);
136 }
137
reset_channel(struct device * dev,int ch)138 static int reset_channel(struct device *dev, int ch)
139 {
140 struct talitos_private *priv = dev_get_drvdata(dev);
141 unsigned int timeout = TALITOS_TIMEOUT;
142 bool is_sec1 = has_ftr_sec1(priv);
143
144 if (is_sec1) {
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146 TALITOS1_CCCR_LO_RESET);
147
148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
149 TALITOS1_CCCR_LO_RESET) && --timeout)
150 cpu_relax();
151 } else {
152 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
153 TALITOS2_CCCR_RESET);
154
155 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
156 TALITOS2_CCCR_RESET) && --timeout)
157 cpu_relax();
158 }
159
160 if (timeout == 0) {
161 dev_err(dev, "failed to reset channel %d\n", ch);
162 return -EIO;
163 }
164
165 /* set 36-bit addressing, done writeback enable and done IRQ enable */
166 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
167 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
168
169 /* and ICCR writeback, if available */
170 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
171 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
172 TALITOS_CCCR_LO_IWSE);
173
174 return 0;
175 }
176
reset_device(struct device * dev)177 static int reset_device(struct device *dev)
178 {
179 struct talitos_private *priv = dev_get_drvdata(dev);
180 unsigned int timeout = TALITOS_TIMEOUT;
181 bool is_sec1 = has_ftr_sec1(priv);
182 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
183
184 setbits32(priv->reg + TALITOS_MCR, mcr);
185
186 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
187 && --timeout)
188 cpu_relax();
189
190 if (priv->irq[1]) {
191 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
192 setbits32(priv->reg + TALITOS_MCR, mcr);
193 }
194
195 if (timeout == 0) {
196 dev_err(dev, "failed to reset device\n");
197 return -EIO;
198 }
199
200 return 0;
201 }
202
203 /*
204 * Reset and initialize the device
205 */
init_device(struct device * dev)206 static int init_device(struct device *dev)
207 {
208 struct talitos_private *priv = dev_get_drvdata(dev);
209 int ch, err;
210 bool is_sec1 = has_ftr_sec1(priv);
211
212 /*
213 * Master reset
214 * errata documentation: warning: certain SEC interrupts
215 * are not fully cleared by writing the MCR:SWR bit,
216 * set bit twice to completely reset
217 */
218 err = reset_device(dev);
219 if (err)
220 return err;
221
222 err = reset_device(dev);
223 if (err)
224 return err;
225
226 /* reset channels */
227 for (ch = 0; ch < priv->num_channels; ch++) {
228 err = reset_channel(dev, ch);
229 if (err)
230 return err;
231 }
232
233 /* enable channel done and error interrupts */
234 if (is_sec1) {
235 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
236 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
237 /* disable parity error check in DEU (erroneous? test vect.) */
238 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
239 } else {
240 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
241 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
242 }
243
244 /* disable integrity check error interrupts (use writeback instead) */
245 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
246 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
247 TALITOS_MDEUICR_LO_ICE);
248
249 return 0;
250 }
251
252 /**
253 * talitos_submit - submits a descriptor to the device for processing
254 * @dev: the SEC device to be used
255 * @ch: the SEC device channel to be used
256 * @desc: the descriptor to be processed by the device
257 * @callback: whom to call when processing is complete
258 * @context: a handle for use by caller (optional)
259 *
260 * desc must contain valid dma-mapped (bus physical) address pointers.
261 * callback must check err and feedback in descriptor header
262 * for device processing status.
263 */
talitos_submit(struct device * dev,int ch,struct talitos_desc * desc,void (* callback)(struct device * dev,struct talitos_desc * desc,void * context,int error),void * context)264 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
265 void (*callback)(struct device *dev,
266 struct talitos_desc *desc,
267 void *context, int error),
268 void *context)
269 {
270 struct talitos_private *priv = dev_get_drvdata(dev);
271 struct talitos_request *request;
272 unsigned long flags;
273 int head;
274 bool is_sec1 = has_ftr_sec1(priv);
275
276 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
277
278 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
279 /* h/w fifo is full */
280 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
281 return -EAGAIN;
282 }
283
284 head = priv->chan[ch].head;
285 request = &priv->chan[ch].fifo[head];
286
287 /* map descriptor and save caller data */
288 if (is_sec1) {
289 desc->hdr1 = desc->hdr;
290 desc->next_desc = 0;
291 request->dma_desc = dma_map_single(dev, &desc->hdr1,
292 TALITOS_DESC_SIZE,
293 DMA_BIDIRECTIONAL);
294 } else {
295 request->dma_desc = dma_map_single(dev, desc,
296 TALITOS_DESC_SIZE,
297 DMA_BIDIRECTIONAL);
298 }
299 request->callback = callback;
300 request->context = context;
301
302 /* increment fifo head */
303 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
304
305 smp_wmb();
306 request->desc = desc;
307
308 /* GO! */
309 wmb();
310 out_be32(priv->chan[ch].reg + TALITOS_FF,
311 upper_32_bits(request->dma_desc));
312 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
313 lower_32_bits(request->dma_desc));
314
315 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
316
317 return -EINPROGRESS;
318 }
319 EXPORT_SYMBOL(talitos_submit);
320
321 /*
322 * process what was done, notify callback of error if not
323 */
flush_channel(struct device * dev,int ch,int error,int reset_ch)324 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
325 {
326 struct talitos_private *priv = dev_get_drvdata(dev);
327 struct talitos_request *request, saved_req;
328 unsigned long flags;
329 int tail, status;
330 bool is_sec1 = has_ftr_sec1(priv);
331
332 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
333
334 tail = priv->chan[ch].tail;
335 while (priv->chan[ch].fifo[tail].desc) {
336 __be32 hdr;
337
338 request = &priv->chan[ch].fifo[tail];
339
340 /* descriptors with their done bits set don't get the error */
341 rmb();
342 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
343
344 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
345 status = 0;
346 else
347 if (!error)
348 break;
349 else
350 status = error;
351
352 dma_unmap_single(dev, request->dma_desc,
353 TALITOS_DESC_SIZE,
354 DMA_BIDIRECTIONAL);
355
356 /* copy entries so we can call callback outside lock */
357 saved_req.desc = request->desc;
358 saved_req.callback = request->callback;
359 saved_req.context = request->context;
360
361 /* release request entry in fifo */
362 smp_wmb();
363 request->desc = NULL;
364
365 /* increment fifo tail */
366 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
367
368 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
369
370 atomic_dec(&priv->chan[ch].submit_count);
371
372 saved_req.callback(dev, saved_req.desc, saved_req.context,
373 status);
374 /* channel may resume processing in single desc error case */
375 if (error && !reset_ch && status == error)
376 return;
377 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
378 tail = priv->chan[ch].tail;
379 }
380
381 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
382 }
383
384 /*
385 * process completed requests for channels that have done status
386 */
387 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
388 static void talitos1_done_##name(unsigned long data) \
389 { \
390 struct device *dev = (struct device *)data; \
391 struct talitos_private *priv = dev_get_drvdata(dev); \
392 unsigned long flags; \
393 \
394 if (ch_done_mask & 0x10000000) \
395 flush_channel(dev, 0, 0, 0); \
396 if (priv->num_channels == 1) \
397 goto out; \
398 if (ch_done_mask & 0x40000000) \
399 flush_channel(dev, 1, 0, 0); \
400 if (ch_done_mask & 0x00010000) \
401 flush_channel(dev, 2, 0, 0); \
402 if (ch_done_mask & 0x00040000) \
403 flush_channel(dev, 3, 0, 0); \
404 \
405 out: \
406 /* At this point, all completed channels have been processed */ \
407 /* Unmask done interrupts for channels completed later on. */ \
408 spin_lock_irqsave(&priv->reg_lock, flags); \
409 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
410 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
411 spin_unlock_irqrestore(&priv->reg_lock, flags); \
412 }
413
414 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
415
416 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
417 static void talitos2_done_##name(unsigned long data) \
418 { \
419 struct device *dev = (struct device *)data; \
420 struct talitos_private *priv = dev_get_drvdata(dev); \
421 unsigned long flags; \
422 \
423 if (ch_done_mask & 1) \
424 flush_channel(dev, 0, 0, 0); \
425 if (priv->num_channels == 1) \
426 goto out; \
427 if (ch_done_mask & (1 << 2)) \
428 flush_channel(dev, 1, 0, 0); \
429 if (ch_done_mask & (1 << 4)) \
430 flush_channel(dev, 2, 0, 0); \
431 if (ch_done_mask & (1 << 6)) \
432 flush_channel(dev, 3, 0, 0); \
433 \
434 out: \
435 /* At this point, all completed channels have been processed */ \
436 /* Unmask done interrupts for channels completed later on. */ \
437 spin_lock_irqsave(&priv->reg_lock, flags); \
438 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
439 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
440 spin_unlock_irqrestore(&priv->reg_lock, flags); \
441 }
442
443 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
DEF_TALITOS2_DONE(ch0_2,TALITOS2_ISR_CH_0_2_DONE)444 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
445 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
446
447 /*
448 * locate current (offending) descriptor
449 */
450 static u32 current_desc_hdr(struct device *dev, int ch)
451 {
452 struct talitos_private *priv = dev_get_drvdata(dev);
453 int tail, iter;
454 dma_addr_t cur_desc;
455
456 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
457 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
458
459 if (!cur_desc) {
460 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
461 return 0;
462 }
463
464 tail = priv->chan[ch].tail;
465
466 iter = tail;
467 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
468 iter = (iter + 1) & (priv->fifo_len - 1);
469 if (iter == tail) {
470 dev_err(dev, "couldn't locate current descriptor\n");
471 return 0;
472 }
473 }
474
475 return priv->chan[ch].fifo[iter].desc->hdr;
476 }
477
478 /*
479 * user diagnostics; report root cause of error based on execution unit status
480 */
report_eu_error(struct device * dev,int ch,u32 desc_hdr)481 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
482 {
483 struct talitos_private *priv = dev_get_drvdata(dev);
484 int i;
485
486 if (!desc_hdr)
487 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
488
489 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
490 case DESC_HDR_SEL0_AFEU:
491 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
492 in_be32(priv->reg_afeu + TALITOS_EUISR),
493 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
494 break;
495 case DESC_HDR_SEL0_DEU:
496 dev_err(dev, "DEUISR 0x%08x_%08x\n",
497 in_be32(priv->reg_deu + TALITOS_EUISR),
498 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
499 break;
500 case DESC_HDR_SEL0_MDEUA:
501 case DESC_HDR_SEL0_MDEUB:
502 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
503 in_be32(priv->reg_mdeu + TALITOS_EUISR),
504 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
505 break;
506 case DESC_HDR_SEL0_RNG:
507 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
508 in_be32(priv->reg_rngu + TALITOS_ISR),
509 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
510 break;
511 case DESC_HDR_SEL0_PKEU:
512 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
513 in_be32(priv->reg_pkeu + TALITOS_EUISR),
514 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
515 break;
516 case DESC_HDR_SEL0_AESU:
517 dev_err(dev, "AESUISR 0x%08x_%08x\n",
518 in_be32(priv->reg_aesu + TALITOS_EUISR),
519 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
520 break;
521 case DESC_HDR_SEL0_CRCU:
522 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
523 in_be32(priv->reg_crcu + TALITOS_EUISR),
524 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
525 break;
526 case DESC_HDR_SEL0_KEU:
527 dev_err(dev, "KEUISR 0x%08x_%08x\n",
528 in_be32(priv->reg_pkeu + TALITOS_EUISR),
529 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
530 break;
531 }
532
533 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
534 case DESC_HDR_SEL1_MDEUA:
535 case DESC_HDR_SEL1_MDEUB:
536 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
537 in_be32(priv->reg_mdeu + TALITOS_EUISR),
538 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
539 break;
540 case DESC_HDR_SEL1_CRCU:
541 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
542 in_be32(priv->reg_crcu + TALITOS_EUISR),
543 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
544 break;
545 }
546
547 for (i = 0; i < 8; i++)
548 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
549 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
550 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
551 }
552
553 /*
554 * recover from error interrupts
555 */
talitos_error(struct device * dev,u32 isr,u32 isr_lo)556 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
557 {
558 struct talitos_private *priv = dev_get_drvdata(dev);
559 unsigned int timeout = TALITOS_TIMEOUT;
560 int ch, error, reset_dev = 0;
561 u32 v_lo;
562 bool is_sec1 = has_ftr_sec1(priv);
563 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
564
565 for (ch = 0; ch < priv->num_channels; ch++) {
566 /* skip channels without errors */
567 if (is_sec1) {
568 /* bits 29, 31, 17, 19 */
569 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
570 continue;
571 } else {
572 if (!(isr & (1 << (ch * 2 + 1))))
573 continue;
574 }
575
576 error = -EINVAL;
577
578 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
579
580 if (v_lo & TALITOS_CCPSR_LO_DOF) {
581 dev_err(dev, "double fetch fifo overflow error\n");
582 error = -EAGAIN;
583 reset_ch = 1;
584 }
585 if (v_lo & TALITOS_CCPSR_LO_SOF) {
586 /* h/w dropped descriptor */
587 dev_err(dev, "single fetch fifo overflow error\n");
588 error = -EAGAIN;
589 }
590 if (v_lo & TALITOS_CCPSR_LO_MDTE)
591 dev_err(dev, "master data transfer error\n");
592 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
593 dev_err(dev, is_sec1 ? "pointer not complete error\n"
594 : "s/g data length zero error\n");
595 if (v_lo & TALITOS_CCPSR_LO_FPZ)
596 dev_err(dev, is_sec1 ? "parity error\n"
597 : "fetch pointer zero error\n");
598 if (v_lo & TALITOS_CCPSR_LO_IDH)
599 dev_err(dev, "illegal descriptor header error\n");
600 if (v_lo & TALITOS_CCPSR_LO_IEU)
601 dev_err(dev, is_sec1 ? "static assignment error\n"
602 : "invalid exec unit error\n");
603 if (v_lo & TALITOS_CCPSR_LO_EU)
604 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
605 if (!is_sec1) {
606 if (v_lo & TALITOS_CCPSR_LO_GB)
607 dev_err(dev, "gather boundary error\n");
608 if (v_lo & TALITOS_CCPSR_LO_GRL)
609 dev_err(dev, "gather return/length error\n");
610 if (v_lo & TALITOS_CCPSR_LO_SB)
611 dev_err(dev, "scatter boundary error\n");
612 if (v_lo & TALITOS_CCPSR_LO_SRL)
613 dev_err(dev, "scatter return/length error\n");
614 }
615
616 flush_channel(dev, ch, error, reset_ch);
617
618 if (reset_ch) {
619 reset_channel(dev, ch);
620 } else {
621 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
622 TALITOS2_CCCR_CONT);
623 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
624 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
625 TALITOS2_CCCR_CONT) && --timeout)
626 cpu_relax();
627 if (timeout == 0) {
628 dev_err(dev, "failed to restart channel %d\n",
629 ch);
630 reset_dev = 1;
631 }
632 }
633 }
634 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
635 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
636 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
637 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
638 isr, isr_lo);
639 else
640 dev_err(dev, "done overflow, internal time out, or "
641 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
642
643 /* purge request queues */
644 for (ch = 0; ch < priv->num_channels; ch++)
645 flush_channel(dev, ch, -EIO, 1);
646
647 /* reset and reinitialize the device */
648 init_device(dev);
649 }
650 }
651
652 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
653 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
654 { \
655 struct device *dev = data; \
656 struct talitos_private *priv = dev_get_drvdata(dev); \
657 u32 isr, isr_lo; \
658 unsigned long flags; \
659 \
660 spin_lock_irqsave(&priv->reg_lock, flags); \
661 isr = in_be32(priv->reg + TALITOS_ISR); \
662 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
663 /* Acknowledge interrupt */ \
664 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
665 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
666 \
667 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
668 spin_unlock_irqrestore(&priv->reg_lock, flags); \
669 talitos_error(dev, isr & ch_err_mask, isr_lo); \
670 } \
671 else { \
672 if (likely(isr & ch_done_mask)) { \
673 /* mask further done interrupts. */ \
674 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
675 /* done_task will unmask done interrupts at exit */ \
676 tasklet_schedule(&priv->done_task[tlet]); \
677 } \
678 spin_unlock_irqrestore(&priv->reg_lock, flags); \
679 } \
680 \
681 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
682 IRQ_NONE; \
683 }
684
685 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
686
687 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
688 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
689 { \
690 struct device *dev = data; \
691 struct talitos_private *priv = dev_get_drvdata(dev); \
692 u32 isr, isr_lo; \
693 unsigned long flags; \
694 \
695 spin_lock_irqsave(&priv->reg_lock, flags); \
696 isr = in_be32(priv->reg + TALITOS_ISR); \
697 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
698 /* Acknowledge interrupt */ \
699 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
700 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
701 \
702 if (unlikely(isr & ch_err_mask || isr_lo)) { \
703 spin_unlock_irqrestore(&priv->reg_lock, flags); \
704 talitos_error(dev, isr & ch_err_mask, isr_lo); \
705 } \
706 else { \
707 if (likely(isr & ch_done_mask)) { \
708 /* mask further done interrupts. */ \
709 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
710 /* done_task will unmask done interrupts at exit */ \
711 tasklet_schedule(&priv->done_task[tlet]); \
712 } \
713 spin_unlock_irqrestore(&priv->reg_lock, flags); \
714 } \
715 \
716 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
717 IRQ_NONE; \
718 }
719
720 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
721 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
722 0)
723 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
724 1)
725
726 /*
727 * hwrng
728 */
talitos_rng_data_present(struct hwrng * rng,int wait)729 static int talitos_rng_data_present(struct hwrng *rng, int wait)
730 {
731 struct device *dev = (struct device *)rng->priv;
732 struct talitos_private *priv = dev_get_drvdata(dev);
733 u32 ofl;
734 int i;
735
736 for (i = 0; i < 20; i++) {
737 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
738 TALITOS_RNGUSR_LO_OFL;
739 if (ofl || !wait)
740 break;
741 udelay(10);
742 }
743
744 return !!ofl;
745 }
746
talitos_rng_data_read(struct hwrng * rng,u32 * data)747 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
748 {
749 struct device *dev = (struct device *)rng->priv;
750 struct talitos_private *priv = dev_get_drvdata(dev);
751
752 /* rng fifo requires 64-bit accesses */
753 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
754 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
755
756 return sizeof(u32);
757 }
758
talitos_rng_init(struct hwrng * rng)759 static int talitos_rng_init(struct hwrng *rng)
760 {
761 struct device *dev = (struct device *)rng->priv;
762 struct talitos_private *priv = dev_get_drvdata(dev);
763 unsigned int timeout = TALITOS_TIMEOUT;
764
765 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
766 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
767 & TALITOS_RNGUSR_LO_RD)
768 && --timeout)
769 cpu_relax();
770 if (timeout == 0) {
771 dev_err(dev, "failed to reset rng hw\n");
772 return -ENODEV;
773 }
774
775 /* start generating */
776 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
777
778 return 0;
779 }
780
talitos_register_rng(struct device * dev)781 static int talitos_register_rng(struct device *dev)
782 {
783 struct talitos_private *priv = dev_get_drvdata(dev);
784 int err;
785
786 priv->rng.name = dev_driver_string(dev),
787 priv->rng.init = talitos_rng_init,
788 priv->rng.data_present = talitos_rng_data_present,
789 priv->rng.data_read = talitos_rng_data_read,
790 priv->rng.priv = (unsigned long)dev;
791
792 err = hwrng_register(&priv->rng);
793 if (!err)
794 priv->rng_registered = true;
795
796 return err;
797 }
798
talitos_unregister_rng(struct device * dev)799 static void talitos_unregister_rng(struct device *dev)
800 {
801 struct talitos_private *priv = dev_get_drvdata(dev);
802
803 if (!priv->rng_registered)
804 return;
805
806 hwrng_unregister(&priv->rng);
807 priv->rng_registered = false;
808 }
809
810 /*
811 * crypto alg
812 */
813 #define TALITOS_CRA_PRIORITY 3000
814 /*
815 * Defines a priority for doing AEAD with descriptors type
816 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
817 */
818 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
819 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
820 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
821
822 struct talitos_ctx {
823 struct device *dev;
824 int ch;
825 __be32 desc_hdr_template;
826 u8 key[TALITOS_MAX_KEY_SIZE];
827 u8 iv[TALITOS_MAX_IV_LENGTH];
828 unsigned int keylen;
829 unsigned int enckeylen;
830 unsigned int authkeylen;
831 };
832
833 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
834 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
835
836 struct talitos_ahash_req_ctx {
837 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
838 unsigned int hw_context_size;
839 u8 buf[HASH_MAX_BLOCK_SIZE];
840 u8 bufnext[HASH_MAX_BLOCK_SIZE];
841 unsigned int swinit;
842 unsigned int first;
843 unsigned int last;
844 unsigned int to_hash_later;
845 unsigned int nbuf;
846 struct scatterlist bufsl[2];
847 struct scatterlist *psrc;
848 };
849
850 struct talitos_export_state {
851 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
852 u8 buf[HASH_MAX_BLOCK_SIZE];
853 unsigned int swinit;
854 unsigned int first;
855 unsigned int last;
856 unsigned int to_hash_later;
857 unsigned int nbuf;
858 };
859
aead_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)860 static int aead_setkey(struct crypto_aead *authenc,
861 const u8 *key, unsigned int keylen)
862 {
863 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
864 struct crypto_authenc_keys keys;
865
866 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
867 goto badkey;
868
869 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
870 goto badkey;
871
872 memcpy(ctx->key, keys.authkey, keys.authkeylen);
873 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
874
875 ctx->keylen = keys.authkeylen + keys.enckeylen;
876 ctx->enckeylen = keys.enckeylen;
877 ctx->authkeylen = keys.authkeylen;
878
879 return 0;
880
881 badkey:
882 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
883 return -EINVAL;
884 }
885
886 /*
887 * talitos_edesc - s/w-extended descriptor
888 * @src_nents: number of segments in input scatterlist
889 * @dst_nents: number of segments in output scatterlist
890 * @icv_ool: whether ICV is out-of-line
891 * @iv_dma: dma address of iv for checking continuity and link table
892 * @dma_len: length of dma mapped link_tbl space
893 * @dma_link_tbl: bus physical address of link_tbl/buf
894 * @desc: h/w descriptor
895 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
896 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
897 *
898 * if decrypting (with authcheck), or either one of src_nents or dst_nents
899 * is greater than 1, an integrity check value is concatenated to the end
900 * of link_tbl data
901 */
902 struct talitos_edesc {
903 int src_nents;
904 int dst_nents;
905 bool icv_ool;
906 dma_addr_t iv_dma;
907 int dma_len;
908 dma_addr_t dma_link_tbl;
909 struct talitos_desc desc;
910 union {
911 struct talitos_ptr link_tbl[0];
912 u8 buf[0];
913 };
914 };
915
talitos_sg_unmap(struct device * dev,struct talitos_edesc * edesc,struct scatterlist * src,struct scatterlist * dst,unsigned int len,unsigned int offset)916 static void talitos_sg_unmap(struct device *dev,
917 struct talitos_edesc *edesc,
918 struct scatterlist *src,
919 struct scatterlist *dst,
920 unsigned int len, unsigned int offset)
921 {
922 struct talitos_private *priv = dev_get_drvdata(dev);
923 bool is_sec1 = has_ftr_sec1(priv);
924 unsigned int src_nents = edesc->src_nents ? : 1;
925 unsigned int dst_nents = edesc->dst_nents ? : 1;
926
927 if (is_sec1 && dst && dst_nents > 1) {
928 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
929 len, DMA_FROM_DEVICE);
930 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
931 offset);
932 }
933 if (src != dst) {
934 if (src_nents == 1 || !is_sec1)
935 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
936
937 if (dst && (dst_nents == 1 || !is_sec1))
938 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
939 } else if (src_nents == 1 || !is_sec1) {
940 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
941 }
942 }
943
ipsec_esp_unmap(struct device * dev,struct talitos_edesc * edesc,struct aead_request * areq,bool encrypt)944 static void ipsec_esp_unmap(struct device *dev,
945 struct talitos_edesc *edesc,
946 struct aead_request *areq, bool encrypt)
947 {
948 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
949 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
950 unsigned int ivsize = crypto_aead_ivsize(aead);
951 unsigned int authsize = crypto_aead_authsize(aead);
952 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
953
954 if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)
955 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
956 DMA_FROM_DEVICE);
957 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
958 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
959 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
960
961 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, cryptlen,
962 areq->assoclen);
963
964 if (edesc->dma_len)
965 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
966 DMA_BIDIRECTIONAL);
967
968 if (!(edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
969 unsigned int dst_nents = edesc->dst_nents ? : 1;
970
971 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
972 areq->assoclen + cryptlen - ivsize);
973 }
974 }
975
976 /*
977 * ipsec_esp descriptor callbacks
978 */
ipsec_esp_encrypt_done(struct device * dev,struct talitos_desc * desc,void * context,int err)979 static void ipsec_esp_encrypt_done(struct device *dev,
980 struct talitos_desc *desc, void *context,
981 int err)
982 {
983 struct talitos_private *priv = dev_get_drvdata(dev);
984 bool is_sec1 = has_ftr_sec1(priv);
985 struct aead_request *areq = context;
986 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
987 unsigned int authsize = crypto_aead_authsize(authenc);
988 struct talitos_edesc *edesc;
989 void *icvdata;
990
991 edesc = container_of(desc, struct talitos_edesc, desc);
992
993 ipsec_esp_unmap(dev, edesc, areq, true);
994
995 /* copy the generated ICV to dst */
996 if (edesc->icv_ool) {
997 if (is_sec1)
998 icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
999 else
1000 icvdata = &edesc->link_tbl[edesc->src_nents +
1001 edesc->dst_nents + 2];
1002 sg_pcopy_from_buffer(areq->dst, edesc->dst_nents ? : 1, icvdata,
1003 authsize, areq->assoclen + areq->cryptlen);
1004 }
1005
1006 kfree(edesc);
1007
1008 aead_request_complete(areq, err);
1009 }
1010
ipsec_esp_decrypt_swauth_done(struct device * dev,struct talitos_desc * desc,void * context,int err)1011 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1012 struct talitos_desc *desc,
1013 void *context, int err)
1014 {
1015 struct aead_request *req = context;
1016 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1017 unsigned int authsize = crypto_aead_authsize(authenc);
1018 struct talitos_edesc *edesc;
1019 char *oicv, *icv;
1020 struct talitos_private *priv = dev_get_drvdata(dev);
1021 bool is_sec1 = has_ftr_sec1(priv);
1022
1023 edesc = container_of(desc, struct talitos_edesc, desc);
1024
1025 ipsec_esp_unmap(dev, edesc, req, false);
1026
1027 if (!err) {
1028 char icvdata[SHA512_DIGEST_SIZE];
1029 int nents = edesc->dst_nents ? : 1;
1030 unsigned int len = req->assoclen + req->cryptlen;
1031
1032 /* auth check */
1033 if (nents > 1) {
1034 sg_pcopy_to_buffer(req->dst, nents, icvdata, authsize,
1035 len - authsize);
1036 icv = icvdata;
1037 } else {
1038 icv = (char *)sg_virt(req->dst) + len - authsize;
1039 }
1040
1041 if (edesc->dma_len) {
1042 if (is_sec1)
1043 oicv = (char *)&edesc->dma_link_tbl +
1044 req->assoclen + req->cryptlen;
1045 else
1046 oicv = (char *)
1047 &edesc->link_tbl[edesc->src_nents +
1048 edesc->dst_nents + 2];
1049 if (edesc->icv_ool)
1050 icv = oicv + authsize;
1051 } else
1052 oicv = (char *)&edesc->link_tbl[0];
1053
1054 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1055 }
1056
1057 kfree(edesc);
1058
1059 aead_request_complete(req, err);
1060 }
1061
ipsec_esp_decrypt_hwauth_done(struct device * dev,struct talitos_desc * desc,void * context,int err)1062 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1063 struct talitos_desc *desc,
1064 void *context, int err)
1065 {
1066 struct aead_request *req = context;
1067 struct talitos_edesc *edesc;
1068
1069 edesc = container_of(desc, struct talitos_edesc, desc);
1070
1071 ipsec_esp_unmap(dev, edesc, req, false);
1072
1073 /* check ICV auth status */
1074 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1075 DESC_HDR_LO_ICCR1_PASS))
1076 err = -EBADMSG;
1077
1078 kfree(edesc);
1079
1080 aead_request_complete(req, err);
1081 }
1082
1083 /*
1084 * convert scatterlist to SEC h/w link table format
1085 * stop at cryptlen bytes
1086 */
sg_to_link_tbl_offset(struct scatterlist * sg,int sg_count,unsigned int offset,int cryptlen,struct talitos_ptr * link_tbl_ptr)1087 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1088 unsigned int offset, int cryptlen,
1089 struct talitos_ptr *link_tbl_ptr)
1090 {
1091 int n_sg = sg_count;
1092 int count = 0;
1093
1094 while (cryptlen && sg && n_sg--) {
1095 unsigned int len = sg_dma_len(sg);
1096
1097 if (offset >= len) {
1098 offset -= len;
1099 goto next;
1100 }
1101
1102 len -= offset;
1103
1104 if (len > cryptlen)
1105 len = cryptlen;
1106
1107 to_talitos_ptr(link_tbl_ptr + count,
1108 sg_dma_address(sg) + offset, 0);
1109 to_talitos_ptr_len(link_tbl_ptr + count, len, 0);
1110 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1111 count++;
1112 cryptlen -= len;
1113 offset = 0;
1114
1115 next:
1116 sg = sg_next(sg);
1117 }
1118
1119 /* tag end of link table */
1120 if (count > 0)
1121 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1122 DESC_PTR_LNKTBL_RETURN, 0);
1123
1124 return count;
1125 }
1126
talitos_sg_map_ext(struct device * dev,struct scatterlist * src,unsigned int len,struct talitos_edesc * edesc,struct talitos_ptr * ptr,int sg_count,unsigned int offset,int tbl_off,int elen)1127 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1128 unsigned int len, struct talitos_edesc *edesc,
1129 struct talitos_ptr *ptr, int sg_count,
1130 unsigned int offset, int tbl_off, int elen)
1131 {
1132 struct talitos_private *priv = dev_get_drvdata(dev);
1133 bool is_sec1 = has_ftr_sec1(priv);
1134
1135 if (!src) {
1136 *ptr = zero_entry;
1137 return 1;
1138 }
1139
1140 to_talitos_ptr_len(ptr, len, is_sec1);
1141 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1142
1143 if (sg_count == 1) {
1144 to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1);
1145 return sg_count;
1146 }
1147 if (is_sec1) {
1148 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1);
1149 return sg_count;
1150 }
1151 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
1152 &edesc->link_tbl[tbl_off]);
1153 if (sg_count == 1) {
1154 /* Only one segment now, so no link tbl needed*/
1155 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1156 return sg_count;
1157 }
1158 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1159 tbl_off * sizeof(struct talitos_ptr), is_sec1);
1160 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1161
1162 return sg_count;
1163 }
1164
talitos_sg_map(struct device * dev,struct scatterlist * src,unsigned int len,struct talitos_edesc * edesc,struct talitos_ptr * ptr,int sg_count,unsigned int offset,int tbl_off)1165 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1166 unsigned int len, struct talitos_edesc *edesc,
1167 struct talitos_ptr *ptr, int sg_count,
1168 unsigned int offset, int tbl_off)
1169 {
1170 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1171 tbl_off, 0);
1172 }
1173
1174 /*
1175 * fill in and submit ipsec_esp descriptor
1176 */
ipsec_esp(struct talitos_edesc * edesc,struct aead_request * areq,bool encrypt,void (* callback)(struct device * dev,struct talitos_desc * desc,void * context,int error))1177 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1178 bool encrypt,
1179 void (*callback)(struct device *dev,
1180 struct talitos_desc *desc,
1181 void *context, int error))
1182 {
1183 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1184 unsigned int authsize = crypto_aead_authsize(aead);
1185 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1186 struct device *dev = ctx->dev;
1187 struct talitos_desc *desc = &edesc->desc;
1188 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1189 unsigned int ivsize = crypto_aead_ivsize(aead);
1190 int tbl_off = 0;
1191 int sg_count, ret;
1192 int elen = 0;
1193 bool sync_needed = false;
1194 struct talitos_private *priv = dev_get_drvdata(dev);
1195 bool is_sec1 = has_ftr_sec1(priv);
1196
1197 /* hmac key */
1198 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1199 DMA_TO_DEVICE);
1200
1201 sg_count = edesc->src_nents ?: 1;
1202 if (is_sec1 && sg_count > 1)
1203 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1204 areq->assoclen + cryptlen);
1205 else
1206 sg_count = dma_map_sg(dev, areq->src, sg_count,
1207 (areq->src == areq->dst) ?
1208 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1209
1210 /* hmac data */
1211 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1212 &desc->ptr[1], sg_count, 0, tbl_off);
1213
1214 if (ret > 1) {
1215 tbl_off += ret;
1216 sync_needed = true;
1217 }
1218
1219 /* cipher iv */
1220 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1221 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, is_sec1);
1222 to_talitos_ptr_len(&desc->ptr[2], ivsize, is_sec1);
1223 to_talitos_ptr_ext_set(&desc->ptr[2], 0, is_sec1);
1224 } else {
1225 to_talitos_ptr(&desc->ptr[3], edesc->iv_dma, is_sec1);
1226 to_talitos_ptr_len(&desc->ptr[3], ivsize, is_sec1);
1227 to_talitos_ptr_ext_set(&desc->ptr[3], 0, is_sec1);
1228 }
1229
1230 /* cipher key */
1231 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1232 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1233 (char *)&ctx->key + ctx->authkeylen,
1234 DMA_TO_DEVICE);
1235 else
1236 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->enckeylen,
1237 (char *)&ctx->key + ctx->authkeylen,
1238 DMA_TO_DEVICE);
1239
1240 /*
1241 * cipher in
1242 * map and adjust cipher len to aead request cryptlen.
1243 * extent is bytes of HMAC postpended to ciphertext,
1244 * typically 12 for ipsec
1245 */
1246 if ((desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1247 (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1248 elen = authsize;
1249
1250 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1251 sg_count, areq->assoclen, tbl_off, elen);
1252
1253 if (ret > 1) {
1254 tbl_off += ret;
1255 sync_needed = true;
1256 }
1257
1258 /* cipher out */
1259 if (areq->src != areq->dst) {
1260 sg_count = edesc->dst_nents ? : 1;
1261 if (!is_sec1 || sg_count == 1)
1262 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1263 }
1264
1265 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1266 sg_count, areq->assoclen, tbl_off);
1267
1268 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1269 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1270
1271 /* ICV data */
1272 if (ret > 1) {
1273 tbl_off += ret;
1274 edesc->icv_ool = true;
1275 sync_needed = true;
1276
1277 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1278 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1279 int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1280 sizeof(struct talitos_ptr) + authsize;
1281
1282 /* Add an entry to the link table for ICV data */
1283 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1284 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1285 is_sec1);
1286 to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
1287
1288 /* icv data follows link tables */
1289 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1290 is_sec1);
1291 } else {
1292 dma_addr_t addr = edesc->dma_link_tbl;
1293
1294 if (is_sec1)
1295 addr += areq->assoclen + cryptlen;
1296 else
1297 addr += sizeof(struct talitos_ptr) * tbl_off;
1298
1299 to_talitos_ptr(&desc->ptr[6], addr, is_sec1);
1300 to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
1301 }
1302 } else if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
1303 ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1304 &desc->ptr[6], sg_count, areq->assoclen +
1305 cryptlen,
1306 tbl_off);
1307 if (ret > 1) {
1308 tbl_off += ret;
1309 edesc->icv_ool = true;
1310 sync_needed = true;
1311 } else {
1312 edesc->icv_ool = false;
1313 }
1314 } else {
1315 edesc->icv_ool = false;
1316 }
1317
1318 /* iv out */
1319 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1320 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1321 DMA_FROM_DEVICE);
1322
1323 if (sync_needed)
1324 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1325 edesc->dma_len,
1326 DMA_BIDIRECTIONAL);
1327
1328 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1329 if (ret != -EINPROGRESS) {
1330 ipsec_esp_unmap(dev, edesc, areq, encrypt);
1331 kfree(edesc);
1332 }
1333 return ret;
1334 }
1335
1336 /*
1337 * allocate and map the extended descriptor
1338 */
talitos_edesc_alloc(struct device * dev,struct scatterlist * src,struct scatterlist * dst,u8 * iv,unsigned int assoclen,unsigned int cryptlen,unsigned int authsize,unsigned int ivsize,int icv_stashing,u32 cryptoflags,bool encrypt)1339 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1340 struct scatterlist *src,
1341 struct scatterlist *dst,
1342 u8 *iv,
1343 unsigned int assoclen,
1344 unsigned int cryptlen,
1345 unsigned int authsize,
1346 unsigned int ivsize,
1347 int icv_stashing,
1348 u32 cryptoflags,
1349 bool encrypt)
1350 {
1351 struct talitos_edesc *edesc;
1352 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1353 dma_addr_t iv_dma = 0;
1354 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1355 GFP_ATOMIC;
1356 struct talitos_private *priv = dev_get_drvdata(dev);
1357 bool is_sec1 = has_ftr_sec1(priv);
1358 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1359
1360 if (cryptlen + authsize > max_len) {
1361 dev_err(dev, "length exceeds h/w max limit\n");
1362 return ERR_PTR(-EINVAL);
1363 }
1364
1365 if (!dst || dst == src) {
1366 src_len = assoclen + cryptlen + authsize;
1367 src_nents = sg_nents_for_len(src, src_len);
1368 if (src_nents < 0) {
1369 dev_err(dev, "Invalid number of src SG.\n");
1370 return ERR_PTR(-EINVAL);
1371 }
1372 src_nents = (src_nents == 1) ? 0 : src_nents;
1373 dst_nents = dst ? src_nents : 0;
1374 dst_len = 0;
1375 } else { /* dst && dst != src*/
1376 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1377 src_nents = sg_nents_for_len(src, src_len);
1378 if (src_nents < 0) {
1379 dev_err(dev, "Invalid number of src SG.\n");
1380 return ERR_PTR(-EINVAL);
1381 }
1382 src_nents = (src_nents == 1) ? 0 : src_nents;
1383 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1384 dst_nents = sg_nents_for_len(dst, dst_len);
1385 if (dst_nents < 0) {
1386 dev_err(dev, "Invalid number of dst SG.\n");
1387 return ERR_PTR(-EINVAL);
1388 }
1389 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1390 }
1391
1392 /*
1393 * allocate space for base edesc plus the link tables,
1394 * allowing for two separate entries for AD and generated ICV (+ 2),
1395 * and space for two sets of ICVs (stashed and generated)
1396 */
1397 alloc_len = sizeof(struct talitos_edesc);
1398 if (src_nents || dst_nents) {
1399 if (is_sec1)
1400 dma_len = (src_nents ? src_len : 0) +
1401 (dst_nents ? dst_len : 0);
1402 else
1403 dma_len = (src_nents + dst_nents + 2) *
1404 sizeof(struct talitos_ptr) + authsize * 2;
1405 alloc_len += dma_len;
1406 } else {
1407 dma_len = 0;
1408 alloc_len += icv_stashing ? authsize : 0;
1409 }
1410 alloc_len += ivsize;
1411
1412 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1413 if (!edesc)
1414 return ERR_PTR(-ENOMEM);
1415 if (ivsize) {
1416 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1417 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1418 }
1419
1420 edesc->src_nents = src_nents;
1421 edesc->dst_nents = dst_nents;
1422 edesc->iv_dma = iv_dma;
1423 edesc->dma_len = dma_len;
1424 if (dma_len)
1425 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1426 edesc->dma_len,
1427 DMA_BIDIRECTIONAL);
1428
1429 return edesc;
1430 }
1431
aead_edesc_alloc(struct aead_request * areq,u8 * iv,int icv_stashing,bool encrypt)1432 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1433 int icv_stashing, bool encrypt)
1434 {
1435 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1436 unsigned int authsize = crypto_aead_authsize(authenc);
1437 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1438 unsigned int ivsize = crypto_aead_ivsize(authenc);
1439 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1440
1441 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1442 iv, areq->assoclen, cryptlen,
1443 authsize, ivsize, icv_stashing,
1444 areq->base.flags, encrypt);
1445 }
1446
aead_encrypt(struct aead_request * req)1447 static int aead_encrypt(struct aead_request *req)
1448 {
1449 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1450 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1451 struct talitos_edesc *edesc;
1452
1453 /* allocate extended descriptor */
1454 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1455 if (IS_ERR(edesc))
1456 return PTR_ERR(edesc);
1457
1458 /* set encrypt */
1459 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1460
1461 return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1462 }
1463
aead_decrypt(struct aead_request * req)1464 static int aead_decrypt(struct aead_request *req)
1465 {
1466 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1467 unsigned int authsize = crypto_aead_authsize(authenc);
1468 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1469 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1470 struct talitos_edesc *edesc;
1471 void *icvdata;
1472
1473 /* allocate extended descriptor */
1474 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1475 if (IS_ERR(edesc))
1476 return PTR_ERR(edesc);
1477
1478 if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1479 (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1480 ((!edesc->src_nents && !edesc->dst_nents) ||
1481 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1482
1483 /* decrypt and check the ICV */
1484 edesc->desc.hdr = ctx->desc_hdr_template |
1485 DESC_HDR_DIR_INBOUND |
1486 DESC_HDR_MODE1_MDEU_CICV;
1487
1488 /* reset integrity check result bits */
1489 edesc->desc.hdr_lo = 0;
1490
1491 return ipsec_esp(edesc, req, false,
1492 ipsec_esp_decrypt_hwauth_done);
1493 }
1494
1495 /* Have to check the ICV with software */
1496 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1497
1498 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1499 if (edesc->dma_len)
1500 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1501 edesc->dst_nents + 2];
1502 else
1503 icvdata = &edesc->link_tbl[0];
1504
1505 sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1506 req->assoclen + req->cryptlen - authsize);
1507
1508 return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1509 }
1510
ablkcipher_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int keylen)1511 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1512 const u8 *key, unsigned int keylen)
1513 {
1514 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1515 u32 tmp[DES_EXPKEY_WORDS];
1516
1517 if (keylen > TALITOS_MAX_KEY_SIZE) {
1518 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1519 return -EINVAL;
1520 }
1521
1522 if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1523 CRYPTO_TFM_REQ_WEAK_KEY) &&
1524 !des_ekey(tmp, key)) {
1525 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1526 return -EINVAL;
1527 }
1528
1529 memcpy(&ctx->key, key, keylen);
1530 ctx->keylen = keylen;
1531
1532 return 0;
1533 }
1534
ablkcipher_aes_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int keylen)1535 static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
1536 const u8 *key, unsigned int keylen)
1537 {
1538 if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1539 keylen == AES_KEYSIZE_256)
1540 return ablkcipher_setkey(cipher, key, keylen);
1541
1542 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1543
1544 return -EINVAL;
1545 }
1546
common_nonsnoop_unmap(struct device * dev,struct talitos_edesc * edesc,struct ablkcipher_request * areq)1547 static void common_nonsnoop_unmap(struct device *dev,
1548 struct talitos_edesc *edesc,
1549 struct ablkcipher_request *areq)
1550 {
1551 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1552
1553 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1554 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1555 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1556
1557 if (edesc->dma_len)
1558 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1559 DMA_BIDIRECTIONAL);
1560 }
1561
ablkcipher_done(struct device * dev,struct talitos_desc * desc,void * context,int err)1562 static void ablkcipher_done(struct device *dev,
1563 struct talitos_desc *desc, void *context,
1564 int err)
1565 {
1566 struct ablkcipher_request *areq = context;
1567 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1568 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1569 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1570 struct talitos_edesc *edesc;
1571
1572 edesc = container_of(desc, struct talitos_edesc, desc);
1573
1574 common_nonsnoop_unmap(dev, edesc, areq);
1575 memcpy(areq->info, ctx->iv, ivsize);
1576
1577 kfree(edesc);
1578
1579 areq->base.complete(&areq->base, err);
1580 }
1581
common_nonsnoop(struct talitos_edesc * edesc,struct ablkcipher_request * areq,void (* callback)(struct device * dev,struct talitos_desc * desc,void * context,int error))1582 static int common_nonsnoop(struct talitos_edesc *edesc,
1583 struct ablkcipher_request *areq,
1584 void (*callback) (struct device *dev,
1585 struct talitos_desc *desc,
1586 void *context, int error))
1587 {
1588 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1589 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1590 struct device *dev = ctx->dev;
1591 struct talitos_desc *desc = &edesc->desc;
1592 unsigned int cryptlen = areq->nbytes;
1593 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1594 int sg_count, ret;
1595 bool sync_needed = false;
1596 struct talitos_private *priv = dev_get_drvdata(dev);
1597 bool is_sec1 = has_ftr_sec1(priv);
1598
1599 /* first DWORD empty */
1600 desc->ptr[0] = zero_entry;
1601
1602 /* cipher iv */
1603 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1604 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1605 to_talitos_ptr_ext_set(&desc->ptr[1], 0, is_sec1);
1606
1607 /* cipher key */
1608 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1609 (char *)&ctx->key, DMA_TO_DEVICE);
1610
1611 sg_count = edesc->src_nents ?: 1;
1612 if (is_sec1 && sg_count > 1)
1613 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1614 cryptlen);
1615 else
1616 sg_count = dma_map_sg(dev, areq->src, sg_count,
1617 (areq->src == areq->dst) ?
1618 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1619 /*
1620 * cipher in
1621 */
1622 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1623 &desc->ptr[3], sg_count, 0, 0);
1624 if (sg_count > 1)
1625 sync_needed = true;
1626
1627 /* cipher out */
1628 if (areq->src != areq->dst) {
1629 sg_count = edesc->dst_nents ? : 1;
1630 if (!is_sec1 || sg_count == 1)
1631 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1632 }
1633
1634 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1635 sg_count, 0, (edesc->src_nents + 1));
1636 if (ret > 1)
1637 sync_needed = true;
1638
1639 /* iv out */
1640 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1641 DMA_FROM_DEVICE);
1642
1643 /* last DWORD empty */
1644 desc->ptr[6] = zero_entry;
1645
1646 if (sync_needed)
1647 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1648 edesc->dma_len, DMA_BIDIRECTIONAL);
1649
1650 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1651 if (ret != -EINPROGRESS) {
1652 common_nonsnoop_unmap(dev, edesc, areq);
1653 kfree(edesc);
1654 }
1655 return ret;
1656 }
1657
ablkcipher_edesc_alloc(struct ablkcipher_request * areq,bool encrypt)1658 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1659 areq, bool encrypt)
1660 {
1661 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1662 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1663 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1664
1665 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1666 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1667 areq->base.flags, encrypt);
1668 }
1669
ablkcipher_encrypt(struct ablkcipher_request * areq)1670 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1671 {
1672 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1673 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1674 struct talitos_edesc *edesc;
1675 unsigned int blocksize =
1676 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1677
1678 if (!areq->nbytes)
1679 return 0;
1680
1681 if (areq->nbytes % blocksize)
1682 return -EINVAL;
1683
1684 /* allocate extended descriptor */
1685 edesc = ablkcipher_edesc_alloc(areq, true);
1686 if (IS_ERR(edesc))
1687 return PTR_ERR(edesc);
1688
1689 /* set encrypt */
1690 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1691
1692 return common_nonsnoop(edesc, areq, ablkcipher_done);
1693 }
1694
ablkcipher_decrypt(struct ablkcipher_request * areq)1695 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1696 {
1697 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1698 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1699 struct talitos_edesc *edesc;
1700 unsigned int blocksize =
1701 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1702
1703 if (!areq->nbytes)
1704 return 0;
1705
1706 if (areq->nbytes % blocksize)
1707 return -EINVAL;
1708
1709 /* allocate extended descriptor */
1710 edesc = ablkcipher_edesc_alloc(areq, false);
1711 if (IS_ERR(edesc))
1712 return PTR_ERR(edesc);
1713
1714 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1715
1716 return common_nonsnoop(edesc, areq, ablkcipher_done);
1717 }
1718
common_nonsnoop_hash_unmap(struct device * dev,struct talitos_edesc * edesc,struct ahash_request * areq)1719 static void common_nonsnoop_hash_unmap(struct device *dev,
1720 struct talitos_edesc *edesc,
1721 struct ahash_request *areq)
1722 {
1723 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1724 struct talitos_private *priv = dev_get_drvdata(dev);
1725 bool is_sec1 = has_ftr_sec1(priv);
1726
1727 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1728
1729 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1730
1731 /* When using hashctx-in, must unmap it. */
1732 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1733 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1734 DMA_TO_DEVICE);
1735
1736 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1737 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1738 DMA_TO_DEVICE);
1739
1740 if (edesc->dma_len)
1741 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1742 DMA_BIDIRECTIONAL);
1743
1744 }
1745
ahash_done(struct device * dev,struct talitos_desc * desc,void * context,int err)1746 static void ahash_done(struct device *dev,
1747 struct talitos_desc *desc, void *context,
1748 int err)
1749 {
1750 struct ahash_request *areq = context;
1751 struct talitos_edesc *edesc =
1752 container_of(desc, struct talitos_edesc, desc);
1753 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1754
1755 if (!req_ctx->last && req_ctx->to_hash_later) {
1756 /* Position any partial block for next update/final/finup */
1757 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1758 req_ctx->nbuf = req_ctx->to_hash_later;
1759 }
1760 common_nonsnoop_hash_unmap(dev, edesc, areq);
1761
1762 kfree(edesc);
1763
1764 areq->base.complete(&areq->base, err);
1765 }
1766
1767 /*
1768 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1769 * ourself and submit a padded block
1770 */
talitos_handle_buggy_hash(struct talitos_ctx * ctx,struct talitos_edesc * edesc,struct talitos_ptr * ptr)1771 void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1772 struct talitos_edesc *edesc,
1773 struct talitos_ptr *ptr)
1774 {
1775 static u8 padded_hash[64] = {
1776 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1777 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1778 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1779 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1780 };
1781
1782 pr_err_once("Bug in SEC1, padding ourself\n");
1783 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1784 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1785 (char *)padded_hash, DMA_TO_DEVICE);
1786 }
1787
common_nonsnoop_hash(struct talitos_edesc * edesc,struct ahash_request * areq,unsigned int length,void (* callback)(struct device * dev,struct talitos_desc * desc,void * context,int error))1788 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1789 struct ahash_request *areq, unsigned int length,
1790 void (*callback) (struct device *dev,
1791 struct talitos_desc *desc,
1792 void *context, int error))
1793 {
1794 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1795 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1796 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1797 struct device *dev = ctx->dev;
1798 struct talitos_desc *desc = &edesc->desc;
1799 int ret;
1800 bool sync_needed = false;
1801 struct talitos_private *priv = dev_get_drvdata(dev);
1802 bool is_sec1 = has_ftr_sec1(priv);
1803 int sg_count;
1804
1805 /* first DWORD empty */
1806 desc->ptr[0] = zero_entry;
1807
1808 /* hash context in */
1809 if (!req_ctx->first || req_ctx->swinit) {
1810 map_single_talitos_ptr(dev, &desc->ptr[1],
1811 req_ctx->hw_context_size,
1812 (char *)req_ctx->hw_context,
1813 DMA_TO_DEVICE);
1814 req_ctx->swinit = 0;
1815 } else {
1816 desc->ptr[1] = zero_entry;
1817 }
1818 /* Indicate next op is not the first. */
1819 req_ctx->first = 0;
1820
1821 /* HMAC key */
1822 if (ctx->keylen)
1823 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1824 (char *)&ctx->key, DMA_TO_DEVICE);
1825 else
1826 desc->ptr[2] = zero_entry;
1827
1828 sg_count = edesc->src_nents ?: 1;
1829 if (is_sec1 && sg_count > 1)
1830 sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1831 else
1832 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1833 DMA_TO_DEVICE);
1834 /*
1835 * data in
1836 */
1837 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1838 &desc->ptr[3], sg_count, 0, 0);
1839 if (sg_count > 1)
1840 sync_needed = true;
1841
1842 /* fifth DWORD empty */
1843 desc->ptr[4] = zero_entry;
1844
1845 /* hash/HMAC out -or- hash context out */
1846 if (req_ctx->last)
1847 map_single_talitos_ptr(dev, &desc->ptr[5],
1848 crypto_ahash_digestsize(tfm),
1849 areq->result, DMA_FROM_DEVICE);
1850 else
1851 map_single_talitos_ptr(dev, &desc->ptr[5],
1852 req_ctx->hw_context_size,
1853 req_ctx->hw_context, DMA_FROM_DEVICE);
1854
1855 /* last DWORD empty */
1856 desc->ptr[6] = zero_entry;
1857
1858 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1859 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1860
1861 if (sync_needed)
1862 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1863 edesc->dma_len, DMA_BIDIRECTIONAL);
1864
1865 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1866 if (ret != -EINPROGRESS) {
1867 common_nonsnoop_hash_unmap(dev, edesc, areq);
1868 kfree(edesc);
1869 }
1870 return ret;
1871 }
1872
ahash_edesc_alloc(struct ahash_request * areq,unsigned int nbytes)1873 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1874 unsigned int nbytes)
1875 {
1876 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1877 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1878 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1879
1880 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1881 nbytes, 0, 0, 0, areq->base.flags, false);
1882 }
1883
ahash_init(struct ahash_request * areq)1884 static int ahash_init(struct ahash_request *areq)
1885 {
1886 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1887 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1888
1889 /* Initialize the context */
1890 req_ctx->nbuf = 0;
1891 req_ctx->first = 1; /* first indicates h/w must init its context */
1892 req_ctx->swinit = 0; /* assume h/w init of context */
1893 req_ctx->hw_context_size =
1894 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1895 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1896 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1897
1898 return 0;
1899 }
1900
1901 /*
1902 * on h/w without explicit sha224 support, we initialize h/w context
1903 * manually with sha224 constants, and tell it to run sha256.
1904 */
ahash_init_sha224_swinit(struct ahash_request * areq)1905 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1906 {
1907 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1908
1909 ahash_init(areq);
1910 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1911
1912 req_ctx->hw_context[0] = SHA224_H0;
1913 req_ctx->hw_context[1] = SHA224_H1;
1914 req_ctx->hw_context[2] = SHA224_H2;
1915 req_ctx->hw_context[3] = SHA224_H3;
1916 req_ctx->hw_context[4] = SHA224_H4;
1917 req_ctx->hw_context[5] = SHA224_H5;
1918 req_ctx->hw_context[6] = SHA224_H6;
1919 req_ctx->hw_context[7] = SHA224_H7;
1920
1921 /* init 64-bit count */
1922 req_ctx->hw_context[8] = 0;
1923 req_ctx->hw_context[9] = 0;
1924
1925 return 0;
1926 }
1927
ahash_process_req(struct ahash_request * areq,unsigned int nbytes)1928 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1929 {
1930 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1931 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1932 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1933 struct talitos_edesc *edesc;
1934 unsigned int blocksize =
1935 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1936 unsigned int nbytes_to_hash;
1937 unsigned int to_hash_later;
1938 unsigned int nsg;
1939 int nents;
1940
1941 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1942 /* Buffer up to one whole block */
1943 nents = sg_nents_for_len(areq->src, nbytes);
1944 if (nents < 0) {
1945 dev_err(ctx->dev, "Invalid number of src SG.\n");
1946 return nents;
1947 }
1948 sg_copy_to_buffer(areq->src, nents,
1949 req_ctx->buf + req_ctx->nbuf, nbytes);
1950 req_ctx->nbuf += nbytes;
1951 return 0;
1952 }
1953
1954 /* At least (blocksize + 1) bytes are available to hash */
1955 nbytes_to_hash = nbytes + req_ctx->nbuf;
1956 to_hash_later = nbytes_to_hash & (blocksize - 1);
1957
1958 if (req_ctx->last)
1959 to_hash_later = 0;
1960 else if (to_hash_later)
1961 /* There is a partial block. Hash the full block(s) now */
1962 nbytes_to_hash -= to_hash_later;
1963 else {
1964 /* Keep one block buffered */
1965 nbytes_to_hash -= blocksize;
1966 to_hash_later = blocksize;
1967 }
1968
1969 /* Chain in any previously buffered data */
1970 if (req_ctx->nbuf) {
1971 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1972 sg_init_table(req_ctx->bufsl, nsg);
1973 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1974 if (nsg > 1)
1975 sg_chain(req_ctx->bufsl, 2, areq->src);
1976 req_ctx->psrc = req_ctx->bufsl;
1977 } else
1978 req_ctx->psrc = areq->src;
1979
1980 if (to_hash_later) {
1981 nents = sg_nents_for_len(areq->src, nbytes);
1982 if (nents < 0) {
1983 dev_err(ctx->dev, "Invalid number of src SG.\n");
1984 return nents;
1985 }
1986 sg_pcopy_to_buffer(areq->src, nents,
1987 req_ctx->bufnext,
1988 to_hash_later,
1989 nbytes - to_hash_later);
1990 }
1991 req_ctx->to_hash_later = to_hash_later;
1992
1993 /* Allocate extended descriptor */
1994 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1995 if (IS_ERR(edesc))
1996 return PTR_ERR(edesc);
1997
1998 edesc->desc.hdr = ctx->desc_hdr_template;
1999
2000 /* On last one, request SEC to pad; otherwise continue */
2001 if (req_ctx->last)
2002 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2003 else
2004 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2005
2006 /* request SEC to INIT hash. */
2007 if (req_ctx->first && !req_ctx->swinit)
2008 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2009
2010 /* When the tfm context has a keylen, it's an HMAC.
2011 * A first or last (ie. not middle) descriptor must request HMAC.
2012 */
2013 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2014 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2015
2016 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
2017 ahash_done);
2018 }
2019
ahash_update(struct ahash_request * areq)2020 static int ahash_update(struct ahash_request *areq)
2021 {
2022 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2023
2024 req_ctx->last = 0;
2025
2026 return ahash_process_req(areq, areq->nbytes);
2027 }
2028
ahash_final(struct ahash_request * areq)2029 static int ahash_final(struct ahash_request *areq)
2030 {
2031 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2032
2033 req_ctx->last = 1;
2034
2035 return ahash_process_req(areq, 0);
2036 }
2037
ahash_finup(struct ahash_request * areq)2038 static int ahash_finup(struct ahash_request *areq)
2039 {
2040 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2041
2042 req_ctx->last = 1;
2043
2044 return ahash_process_req(areq, areq->nbytes);
2045 }
2046
ahash_digest(struct ahash_request * areq)2047 static int ahash_digest(struct ahash_request *areq)
2048 {
2049 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2050 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2051
2052 ahash->init(areq);
2053 req_ctx->last = 1;
2054
2055 return ahash_process_req(areq, areq->nbytes);
2056 }
2057
ahash_export(struct ahash_request * areq,void * out)2058 static int ahash_export(struct ahash_request *areq, void *out)
2059 {
2060 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2061 struct talitos_export_state *export = out;
2062
2063 memcpy(export->hw_context, req_ctx->hw_context,
2064 req_ctx->hw_context_size);
2065 memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
2066 export->swinit = req_ctx->swinit;
2067 export->first = req_ctx->first;
2068 export->last = req_ctx->last;
2069 export->to_hash_later = req_ctx->to_hash_later;
2070 export->nbuf = req_ctx->nbuf;
2071
2072 return 0;
2073 }
2074
ahash_import(struct ahash_request * areq,const void * in)2075 static int ahash_import(struct ahash_request *areq, const void *in)
2076 {
2077 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2078 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2079 const struct talitos_export_state *export = in;
2080
2081 memset(req_ctx, 0, sizeof(*req_ctx));
2082 req_ctx->hw_context_size =
2083 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2084 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2085 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2086 memcpy(req_ctx->hw_context, export->hw_context,
2087 req_ctx->hw_context_size);
2088 memcpy(req_ctx->buf, export->buf, export->nbuf);
2089 req_ctx->swinit = export->swinit;
2090 req_ctx->first = export->first;
2091 req_ctx->last = export->last;
2092 req_ctx->to_hash_later = export->to_hash_later;
2093 req_ctx->nbuf = export->nbuf;
2094
2095 return 0;
2096 }
2097
2098 struct keyhash_result {
2099 struct completion completion;
2100 int err;
2101 };
2102
keyhash_complete(struct crypto_async_request * req,int err)2103 static void keyhash_complete(struct crypto_async_request *req, int err)
2104 {
2105 struct keyhash_result *res = req->data;
2106
2107 if (err == -EINPROGRESS)
2108 return;
2109
2110 res->err = err;
2111 complete(&res->completion);
2112 }
2113
keyhash(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen,u8 * hash)2114 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2115 u8 *hash)
2116 {
2117 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2118
2119 struct scatterlist sg[1];
2120 struct ahash_request *req;
2121 struct keyhash_result hresult;
2122 int ret;
2123
2124 init_completion(&hresult.completion);
2125
2126 req = ahash_request_alloc(tfm, GFP_KERNEL);
2127 if (!req)
2128 return -ENOMEM;
2129
2130 /* Keep tfm keylen == 0 during hash of the long key */
2131 ctx->keylen = 0;
2132 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2133 keyhash_complete, &hresult);
2134
2135 sg_init_one(&sg[0], key, keylen);
2136
2137 ahash_request_set_crypt(req, sg, hash, keylen);
2138 ret = crypto_ahash_digest(req);
2139 switch (ret) {
2140 case 0:
2141 break;
2142 case -EINPROGRESS:
2143 case -EBUSY:
2144 ret = wait_for_completion_interruptible(
2145 &hresult.completion);
2146 if (!ret)
2147 ret = hresult.err;
2148 break;
2149 default:
2150 break;
2151 }
2152 ahash_request_free(req);
2153
2154 return ret;
2155 }
2156
ahash_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)2157 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2158 unsigned int keylen)
2159 {
2160 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2161 unsigned int blocksize =
2162 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2163 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2164 unsigned int keysize = keylen;
2165 u8 hash[SHA512_DIGEST_SIZE];
2166 int ret;
2167
2168 if (keylen <= blocksize)
2169 memcpy(ctx->key, key, keysize);
2170 else {
2171 /* Must get the hash of the long key */
2172 ret = keyhash(tfm, key, keylen, hash);
2173
2174 if (ret) {
2175 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2176 return -EINVAL;
2177 }
2178
2179 keysize = digestsize;
2180 memcpy(ctx->key, hash, digestsize);
2181 }
2182
2183 ctx->keylen = keysize;
2184
2185 return 0;
2186 }
2187
2188
2189 struct talitos_alg_template {
2190 u32 type;
2191 u32 priority;
2192 union {
2193 struct crypto_alg crypto;
2194 struct ahash_alg hash;
2195 struct aead_alg aead;
2196 } alg;
2197 __be32 desc_hdr_template;
2198 };
2199
2200 static struct talitos_alg_template driver_algs[] = {
2201 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2202 { .type = CRYPTO_ALG_TYPE_AEAD,
2203 .alg.aead = {
2204 .base = {
2205 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2206 .cra_driver_name = "authenc-hmac-sha1-"
2207 "cbc-aes-talitos",
2208 .cra_blocksize = AES_BLOCK_SIZE,
2209 .cra_flags = CRYPTO_ALG_ASYNC,
2210 },
2211 .ivsize = AES_BLOCK_SIZE,
2212 .maxauthsize = SHA1_DIGEST_SIZE,
2213 },
2214 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2215 DESC_HDR_SEL0_AESU |
2216 DESC_HDR_MODE0_AESU_CBC |
2217 DESC_HDR_SEL1_MDEUA |
2218 DESC_HDR_MODE1_MDEU_INIT |
2219 DESC_HDR_MODE1_MDEU_PAD |
2220 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2221 },
2222 { .type = CRYPTO_ALG_TYPE_AEAD,
2223 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2224 .alg.aead = {
2225 .base = {
2226 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2227 .cra_driver_name = "authenc-hmac-sha1-"
2228 "cbc-aes-talitos-hsna",
2229 .cra_blocksize = AES_BLOCK_SIZE,
2230 .cra_flags = CRYPTO_ALG_ASYNC,
2231 },
2232 .ivsize = AES_BLOCK_SIZE,
2233 .maxauthsize = SHA1_DIGEST_SIZE,
2234 },
2235 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2236 DESC_HDR_SEL0_AESU |
2237 DESC_HDR_MODE0_AESU_CBC |
2238 DESC_HDR_SEL1_MDEUA |
2239 DESC_HDR_MODE1_MDEU_INIT |
2240 DESC_HDR_MODE1_MDEU_PAD |
2241 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2242 },
2243 { .type = CRYPTO_ALG_TYPE_AEAD,
2244 .alg.aead = {
2245 .base = {
2246 .cra_name = "authenc(hmac(sha1),"
2247 "cbc(des3_ede))",
2248 .cra_driver_name = "authenc-hmac-sha1-"
2249 "cbc-3des-talitos",
2250 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2251 .cra_flags = CRYPTO_ALG_ASYNC,
2252 },
2253 .ivsize = DES3_EDE_BLOCK_SIZE,
2254 .maxauthsize = SHA1_DIGEST_SIZE,
2255 },
2256 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2257 DESC_HDR_SEL0_DEU |
2258 DESC_HDR_MODE0_DEU_CBC |
2259 DESC_HDR_MODE0_DEU_3DES |
2260 DESC_HDR_SEL1_MDEUA |
2261 DESC_HDR_MODE1_MDEU_INIT |
2262 DESC_HDR_MODE1_MDEU_PAD |
2263 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2264 },
2265 { .type = CRYPTO_ALG_TYPE_AEAD,
2266 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2267 .alg.aead = {
2268 .base = {
2269 .cra_name = "authenc(hmac(sha1),"
2270 "cbc(des3_ede))",
2271 .cra_driver_name = "authenc-hmac-sha1-"
2272 "cbc-3des-talitos-hsna",
2273 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2274 .cra_flags = CRYPTO_ALG_ASYNC,
2275 },
2276 .ivsize = DES3_EDE_BLOCK_SIZE,
2277 .maxauthsize = SHA1_DIGEST_SIZE,
2278 },
2279 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2280 DESC_HDR_SEL0_DEU |
2281 DESC_HDR_MODE0_DEU_CBC |
2282 DESC_HDR_MODE0_DEU_3DES |
2283 DESC_HDR_SEL1_MDEUA |
2284 DESC_HDR_MODE1_MDEU_INIT |
2285 DESC_HDR_MODE1_MDEU_PAD |
2286 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2287 },
2288 { .type = CRYPTO_ALG_TYPE_AEAD,
2289 .alg.aead = {
2290 .base = {
2291 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2292 .cra_driver_name = "authenc-hmac-sha224-"
2293 "cbc-aes-talitos",
2294 .cra_blocksize = AES_BLOCK_SIZE,
2295 .cra_flags = CRYPTO_ALG_ASYNC,
2296 },
2297 .ivsize = AES_BLOCK_SIZE,
2298 .maxauthsize = SHA224_DIGEST_SIZE,
2299 },
2300 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2301 DESC_HDR_SEL0_AESU |
2302 DESC_HDR_MODE0_AESU_CBC |
2303 DESC_HDR_SEL1_MDEUA |
2304 DESC_HDR_MODE1_MDEU_INIT |
2305 DESC_HDR_MODE1_MDEU_PAD |
2306 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2307 },
2308 { .type = CRYPTO_ALG_TYPE_AEAD,
2309 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2310 .alg.aead = {
2311 .base = {
2312 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2313 .cra_driver_name = "authenc-hmac-sha224-"
2314 "cbc-aes-talitos-hsna",
2315 .cra_blocksize = AES_BLOCK_SIZE,
2316 .cra_flags = CRYPTO_ALG_ASYNC,
2317 },
2318 .ivsize = AES_BLOCK_SIZE,
2319 .maxauthsize = SHA224_DIGEST_SIZE,
2320 },
2321 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2322 DESC_HDR_SEL0_AESU |
2323 DESC_HDR_MODE0_AESU_CBC |
2324 DESC_HDR_SEL1_MDEUA |
2325 DESC_HDR_MODE1_MDEU_INIT |
2326 DESC_HDR_MODE1_MDEU_PAD |
2327 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2328 },
2329 { .type = CRYPTO_ALG_TYPE_AEAD,
2330 .alg.aead = {
2331 .base = {
2332 .cra_name = "authenc(hmac(sha224),"
2333 "cbc(des3_ede))",
2334 .cra_driver_name = "authenc-hmac-sha224-"
2335 "cbc-3des-talitos",
2336 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2337 .cra_flags = CRYPTO_ALG_ASYNC,
2338 },
2339 .ivsize = DES3_EDE_BLOCK_SIZE,
2340 .maxauthsize = SHA224_DIGEST_SIZE,
2341 },
2342 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2343 DESC_HDR_SEL0_DEU |
2344 DESC_HDR_MODE0_DEU_CBC |
2345 DESC_HDR_MODE0_DEU_3DES |
2346 DESC_HDR_SEL1_MDEUA |
2347 DESC_HDR_MODE1_MDEU_INIT |
2348 DESC_HDR_MODE1_MDEU_PAD |
2349 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2350 },
2351 { .type = CRYPTO_ALG_TYPE_AEAD,
2352 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2353 .alg.aead = {
2354 .base = {
2355 .cra_name = "authenc(hmac(sha224),"
2356 "cbc(des3_ede))",
2357 .cra_driver_name = "authenc-hmac-sha224-"
2358 "cbc-3des-talitos-hsna",
2359 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2360 .cra_flags = CRYPTO_ALG_ASYNC,
2361 },
2362 .ivsize = DES3_EDE_BLOCK_SIZE,
2363 .maxauthsize = SHA224_DIGEST_SIZE,
2364 },
2365 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2366 DESC_HDR_SEL0_DEU |
2367 DESC_HDR_MODE0_DEU_CBC |
2368 DESC_HDR_MODE0_DEU_3DES |
2369 DESC_HDR_SEL1_MDEUA |
2370 DESC_HDR_MODE1_MDEU_INIT |
2371 DESC_HDR_MODE1_MDEU_PAD |
2372 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2373 },
2374 { .type = CRYPTO_ALG_TYPE_AEAD,
2375 .alg.aead = {
2376 .base = {
2377 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2378 .cra_driver_name = "authenc-hmac-sha256-"
2379 "cbc-aes-talitos",
2380 .cra_blocksize = AES_BLOCK_SIZE,
2381 .cra_flags = CRYPTO_ALG_ASYNC,
2382 },
2383 .ivsize = AES_BLOCK_SIZE,
2384 .maxauthsize = SHA256_DIGEST_SIZE,
2385 },
2386 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2387 DESC_HDR_SEL0_AESU |
2388 DESC_HDR_MODE0_AESU_CBC |
2389 DESC_HDR_SEL1_MDEUA |
2390 DESC_HDR_MODE1_MDEU_INIT |
2391 DESC_HDR_MODE1_MDEU_PAD |
2392 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2393 },
2394 { .type = CRYPTO_ALG_TYPE_AEAD,
2395 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2396 .alg.aead = {
2397 .base = {
2398 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2399 .cra_driver_name = "authenc-hmac-sha256-"
2400 "cbc-aes-talitos-hsna",
2401 .cra_blocksize = AES_BLOCK_SIZE,
2402 .cra_flags = CRYPTO_ALG_ASYNC,
2403 },
2404 .ivsize = AES_BLOCK_SIZE,
2405 .maxauthsize = SHA256_DIGEST_SIZE,
2406 },
2407 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2408 DESC_HDR_SEL0_AESU |
2409 DESC_HDR_MODE0_AESU_CBC |
2410 DESC_HDR_SEL1_MDEUA |
2411 DESC_HDR_MODE1_MDEU_INIT |
2412 DESC_HDR_MODE1_MDEU_PAD |
2413 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2414 },
2415 { .type = CRYPTO_ALG_TYPE_AEAD,
2416 .alg.aead = {
2417 .base = {
2418 .cra_name = "authenc(hmac(sha256),"
2419 "cbc(des3_ede))",
2420 .cra_driver_name = "authenc-hmac-sha256-"
2421 "cbc-3des-talitos",
2422 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2423 .cra_flags = CRYPTO_ALG_ASYNC,
2424 },
2425 .ivsize = DES3_EDE_BLOCK_SIZE,
2426 .maxauthsize = SHA256_DIGEST_SIZE,
2427 },
2428 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2429 DESC_HDR_SEL0_DEU |
2430 DESC_HDR_MODE0_DEU_CBC |
2431 DESC_HDR_MODE0_DEU_3DES |
2432 DESC_HDR_SEL1_MDEUA |
2433 DESC_HDR_MODE1_MDEU_INIT |
2434 DESC_HDR_MODE1_MDEU_PAD |
2435 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2436 },
2437 { .type = CRYPTO_ALG_TYPE_AEAD,
2438 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2439 .alg.aead = {
2440 .base = {
2441 .cra_name = "authenc(hmac(sha256),"
2442 "cbc(des3_ede))",
2443 .cra_driver_name = "authenc-hmac-sha256-"
2444 "cbc-3des-talitos-hsna",
2445 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2446 .cra_flags = CRYPTO_ALG_ASYNC,
2447 },
2448 .ivsize = DES3_EDE_BLOCK_SIZE,
2449 .maxauthsize = SHA256_DIGEST_SIZE,
2450 },
2451 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2452 DESC_HDR_SEL0_DEU |
2453 DESC_HDR_MODE0_DEU_CBC |
2454 DESC_HDR_MODE0_DEU_3DES |
2455 DESC_HDR_SEL1_MDEUA |
2456 DESC_HDR_MODE1_MDEU_INIT |
2457 DESC_HDR_MODE1_MDEU_PAD |
2458 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2459 },
2460 { .type = CRYPTO_ALG_TYPE_AEAD,
2461 .alg.aead = {
2462 .base = {
2463 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2464 .cra_driver_name = "authenc-hmac-sha384-"
2465 "cbc-aes-talitos",
2466 .cra_blocksize = AES_BLOCK_SIZE,
2467 .cra_flags = CRYPTO_ALG_ASYNC,
2468 },
2469 .ivsize = AES_BLOCK_SIZE,
2470 .maxauthsize = SHA384_DIGEST_SIZE,
2471 },
2472 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2473 DESC_HDR_SEL0_AESU |
2474 DESC_HDR_MODE0_AESU_CBC |
2475 DESC_HDR_SEL1_MDEUB |
2476 DESC_HDR_MODE1_MDEU_INIT |
2477 DESC_HDR_MODE1_MDEU_PAD |
2478 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2479 },
2480 { .type = CRYPTO_ALG_TYPE_AEAD,
2481 .alg.aead = {
2482 .base = {
2483 .cra_name = "authenc(hmac(sha384),"
2484 "cbc(des3_ede))",
2485 .cra_driver_name = "authenc-hmac-sha384-"
2486 "cbc-3des-talitos",
2487 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2488 .cra_flags = CRYPTO_ALG_ASYNC,
2489 },
2490 .ivsize = DES3_EDE_BLOCK_SIZE,
2491 .maxauthsize = SHA384_DIGEST_SIZE,
2492 },
2493 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2494 DESC_HDR_SEL0_DEU |
2495 DESC_HDR_MODE0_DEU_CBC |
2496 DESC_HDR_MODE0_DEU_3DES |
2497 DESC_HDR_SEL1_MDEUB |
2498 DESC_HDR_MODE1_MDEU_INIT |
2499 DESC_HDR_MODE1_MDEU_PAD |
2500 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2501 },
2502 { .type = CRYPTO_ALG_TYPE_AEAD,
2503 .alg.aead = {
2504 .base = {
2505 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2506 .cra_driver_name = "authenc-hmac-sha512-"
2507 "cbc-aes-talitos",
2508 .cra_blocksize = AES_BLOCK_SIZE,
2509 .cra_flags = CRYPTO_ALG_ASYNC,
2510 },
2511 .ivsize = AES_BLOCK_SIZE,
2512 .maxauthsize = SHA512_DIGEST_SIZE,
2513 },
2514 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2515 DESC_HDR_SEL0_AESU |
2516 DESC_HDR_MODE0_AESU_CBC |
2517 DESC_HDR_SEL1_MDEUB |
2518 DESC_HDR_MODE1_MDEU_INIT |
2519 DESC_HDR_MODE1_MDEU_PAD |
2520 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2521 },
2522 { .type = CRYPTO_ALG_TYPE_AEAD,
2523 .alg.aead = {
2524 .base = {
2525 .cra_name = "authenc(hmac(sha512),"
2526 "cbc(des3_ede))",
2527 .cra_driver_name = "authenc-hmac-sha512-"
2528 "cbc-3des-talitos",
2529 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2530 .cra_flags = CRYPTO_ALG_ASYNC,
2531 },
2532 .ivsize = DES3_EDE_BLOCK_SIZE,
2533 .maxauthsize = SHA512_DIGEST_SIZE,
2534 },
2535 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2536 DESC_HDR_SEL0_DEU |
2537 DESC_HDR_MODE0_DEU_CBC |
2538 DESC_HDR_MODE0_DEU_3DES |
2539 DESC_HDR_SEL1_MDEUB |
2540 DESC_HDR_MODE1_MDEU_INIT |
2541 DESC_HDR_MODE1_MDEU_PAD |
2542 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2543 },
2544 { .type = CRYPTO_ALG_TYPE_AEAD,
2545 .alg.aead = {
2546 .base = {
2547 .cra_name = "authenc(hmac(md5),cbc(aes))",
2548 .cra_driver_name = "authenc-hmac-md5-"
2549 "cbc-aes-talitos",
2550 .cra_blocksize = AES_BLOCK_SIZE,
2551 .cra_flags = CRYPTO_ALG_ASYNC,
2552 },
2553 .ivsize = AES_BLOCK_SIZE,
2554 .maxauthsize = MD5_DIGEST_SIZE,
2555 },
2556 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2557 DESC_HDR_SEL0_AESU |
2558 DESC_HDR_MODE0_AESU_CBC |
2559 DESC_HDR_SEL1_MDEUA |
2560 DESC_HDR_MODE1_MDEU_INIT |
2561 DESC_HDR_MODE1_MDEU_PAD |
2562 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2563 },
2564 { .type = CRYPTO_ALG_TYPE_AEAD,
2565 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2566 .alg.aead = {
2567 .base = {
2568 .cra_name = "authenc(hmac(md5),cbc(aes))",
2569 .cra_driver_name = "authenc-hmac-md5-"
2570 "cbc-aes-talitos-hsna",
2571 .cra_blocksize = AES_BLOCK_SIZE,
2572 .cra_flags = CRYPTO_ALG_ASYNC,
2573 },
2574 .ivsize = AES_BLOCK_SIZE,
2575 .maxauthsize = MD5_DIGEST_SIZE,
2576 },
2577 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2578 DESC_HDR_SEL0_AESU |
2579 DESC_HDR_MODE0_AESU_CBC |
2580 DESC_HDR_SEL1_MDEUA |
2581 DESC_HDR_MODE1_MDEU_INIT |
2582 DESC_HDR_MODE1_MDEU_PAD |
2583 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2584 },
2585 { .type = CRYPTO_ALG_TYPE_AEAD,
2586 .alg.aead = {
2587 .base = {
2588 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2589 .cra_driver_name = "authenc-hmac-md5-"
2590 "cbc-3des-talitos",
2591 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2592 .cra_flags = CRYPTO_ALG_ASYNC,
2593 },
2594 .ivsize = DES3_EDE_BLOCK_SIZE,
2595 .maxauthsize = MD5_DIGEST_SIZE,
2596 },
2597 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2598 DESC_HDR_SEL0_DEU |
2599 DESC_HDR_MODE0_DEU_CBC |
2600 DESC_HDR_MODE0_DEU_3DES |
2601 DESC_HDR_SEL1_MDEUA |
2602 DESC_HDR_MODE1_MDEU_INIT |
2603 DESC_HDR_MODE1_MDEU_PAD |
2604 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2605 },
2606 { .type = CRYPTO_ALG_TYPE_AEAD,
2607 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2608 .alg.aead = {
2609 .base = {
2610 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2611 .cra_driver_name = "authenc-hmac-md5-"
2612 "cbc-3des-talitos-hsna",
2613 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2614 .cra_flags = CRYPTO_ALG_ASYNC,
2615 },
2616 .ivsize = DES3_EDE_BLOCK_SIZE,
2617 .maxauthsize = MD5_DIGEST_SIZE,
2618 },
2619 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2620 DESC_HDR_SEL0_DEU |
2621 DESC_HDR_MODE0_DEU_CBC |
2622 DESC_HDR_MODE0_DEU_3DES |
2623 DESC_HDR_SEL1_MDEUA |
2624 DESC_HDR_MODE1_MDEU_INIT |
2625 DESC_HDR_MODE1_MDEU_PAD |
2626 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2627 },
2628 /* ABLKCIPHER algorithms. */
2629 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2630 .alg.crypto = {
2631 .cra_name = "ecb(aes)",
2632 .cra_driver_name = "ecb-aes-talitos",
2633 .cra_blocksize = AES_BLOCK_SIZE,
2634 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2635 CRYPTO_ALG_ASYNC,
2636 .cra_ablkcipher = {
2637 .min_keysize = AES_MIN_KEY_SIZE,
2638 .max_keysize = AES_MAX_KEY_SIZE,
2639 .ivsize = AES_BLOCK_SIZE,
2640 }
2641 },
2642 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2643 DESC_HDR_SEL0_AESU,
2644 },
2645 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2646 .alg.crypto = {
2647 .cra_name = "cbc(aes)",
2648 .cra_driver_name = "cbc-aes-talitos",
2649 .cra_blocksize = AES_BLOCK_SIZE,
2650 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2651 CRYPTO_ALG_ASYNC,
2652 .cra_ablkcipher = {
2653 .min_keysize = AES_MIN_KEY_SIZE,
2654 .max_keysize = AES_MAX_KEY_SIZE,
2655 .ivsize = AES_BLOCK_SIZE,
2656 .setkey = ablkcipher_aes_setkey,
2657 }
2658 },
2659 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2660 DESC_HDR_SEL0_AESU |
2661 DESC_HDR_MODE0_AESU_CBC,
2662 },
2663 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2664 .alg.crypto = {
2665 .cra_name = "ctr(aes)",
2666 .cra_driver_name = "ctr-aes-talitos",
2667 .cra_blocksize = 1,
2668 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2669 CRYPTO_ALG_ASYNC,
2670 .cra_ablkcipher = {
2671 .min_keysize = AES_MIN_KEY_SIZE,
2672 .max_keysize = AES_MAX_KEY_SIZE,
2673 .setkey = ablkcipher_aes_setkey,
2674 }
2675 },
2676 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2677 DESC_HDR_SEL0_AESU |
2678 DESC_HDR_MODE0_AESU_CTR,
2679 },
2680 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2681 .alg.crypto = {
2682 .cra_name = "ecb(des)",
2683 .cra_driver_name = "ecb-des-talitos",
2684 .cra_blocksize = DES_BLOCK_SIZE,
2685 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2686 CRYPTO_ALG_ASYNC,
2687 .cra_ablkcipher = {
2688 .min_keysize = DES_KEY_SIZE,
2689 .max_keysize = DES_KEY_SIZE,
2690 .ivsize = DES_BLOCK_SIZE,
2691 }
2692 },
2693 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2694 DESC_HDR_SEL0_DEU,
2695 },
2696 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2697 .alg.crypto = {
2698 .cra_name = "cbc(des)",
2699 .cra_driver_name = "cbc-des-talitos",
2700 .cra_blocksize = DES_BLOCK_SIZE,
2701 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2702 CRYPTO_ALG_ASYNC,
2703 .cra_ablkcipher = {
2704 .min_keysize = DES_KEY_SIZE,
2705 .max_keysize = DES_KEY_SIZE,
2706 .ivsize = DES_BLOCK_SIZE,
2707 }
2708 },
2709 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2710 DESC_HDR_SEL0_DEU |
2711 DESC_HDR_MODE0_DEU_CBC,
2712 },
2713 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2714 .alg.crypto = {
2715 .cra_name = "ecb(des3_ede)",
2716 .cra_driver_name = "ecb-3des-talitos",
2717 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2718 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2719 CRYPTO_ALG_ASYNC,
2720 .cra_ablkcipher = {
2721 .min_keysize = DES3_EDE_KEY_SIZE,
2722 .max_keysize = DES3_EDE_KEY_SIZE,
2723 .ivsize = DES3_EDE_BLOCK_SIZE,
2724 }
2725 },
2726 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2727 DESC_HDR_SEL0_DEU |
2728 DESC_HDR_MODE0_DEU_3DES,
2729 },
2730 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2731 .alg.crypto = {
2732 .cra_name = "cbc(des3_ede)",
2733 .cra_driver_name = "cbc-3des-talitos",
2734 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2735 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2736 CRYPTO_ALG_ASYNC,
2737 .cra_ablkcipher = {
2738 .min_keysize = DES3_EDE_KEY_SIZE,
2739 .max_keysize = DES3_EDE_KEY_SIZE,
2740 .ivsize = DES3_EDE_BLOCK_SIZE,
2741 }
2742 },
2743 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2744 DESC_HDR_SEL0_DEU |
2745 DESC_HDR_MODE0_DEU_CBC |
2746 DESC_HDR_MODE0_DEU_3DES,
2747 },
2748 /* AHASH algorithms. */
2749 { .type = CRYPTO_ALG_TYPE_AHASH,
2750 .alg.hash = {
2751 .halg.digestsize = MD5_DIGEST_SIZE,
2752 .halg.statesize = sizeof(struct talitos_export_state),
2753 .halg.base = {
2754 .cra_name = "md5",
2755 .cra_driver_name = "md5-talitos",
2756 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2757 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2758 CRYPTO_ALG_ASYNC,
2759 }
2760 },
2761 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2762 DESC_HDR_SEL0_MDEUA |
2763 DESC_HDR_MODE0_MDEU_MD5,
2764 },
2765 { .type = CRYPTO_ALG_TYPE_AHASH,
2766 .alg.hash = {
2767 .halg.digestsize = SHA1_DIGEST_SIZE,
2768 .halg.statesize = sizeof(struct talitos_export_state),
2769 .halg.base = {
2770 .cra_name = "sha1",
2771 .cra_driver_name = "sha1-talitos",
2772 .cra_blocksize = SHA1_BLOCK_SIZE,
2773 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2774 CRYPTO_ALG_ASYNC,
2775 }
2776 },
2777 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2778 DESC_HDR_SEL0_MDEUA |
2779 DESC_HDR_MODE0_MDEU_SHA1,
2780 },
2781 { .type = CRYPTO_ALG_TYPE_AHASH,
2782 .alg.hash = {
2783 .halg.digestsize = SHA224_DIGEST_SIZE,
2784 .halg.statesize = sizeof(struct talitos_export_state),
2785 .halg.base = {
2786 .cra_name = "sha224",
2787 .cra_driver_name = "sha224-talitos",
2788 .cra_blocksize = SHA224_BLOCK_SIZE,
2789 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2790 CRYPTO_ALG_ASYNC,
2791 }
2792 },
2793 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2794 DESC_HDR_SEL0_MDEUA |
2795 DESC_HDR_MODE0_MDEU_SHA224,
2796 },
2797 { .type = CRYPTO_ALG_TYPE_AHASH,
2798 .alg.hash = {
2799 .halg.digestsize = SHA256_DIGEST_SIZE,
2800 .halg.statesize = sizeof(struct talitos_export_state),
2801 .halg.base = {
2802 .cra_name = "sha256",
2803 .cra_driver_name = "sha256-talitos",
2804 .cra_blocksize = SHA256_BLOCK_SIZE,
2805 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2806 CRYPTO_ALG_ASYNC,
2807 }
2808 },
2809 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2810 DESC_HDR_SEL0_MDEUA |
2811 DESC_HDR_MODE0_MDEU_SHA256,
2812 },
2813 { .type = CRYPTO_ALG_TYPE_AHASH,
2814 .alg.hash = {
2815 .halg.digestsize = SHA384_DIGEST_SIZE,
2816 .halg.statesize = sizeof(struct talitos_export_state),
2817 .halg.base = {
2818 .cra_name = "sha384",
2819 .cra_driver_name = "sha384-talitos",
2820 .cra_blocksize = SHA384_BLOCK_SIZE,
2821 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2822 CRYPTO_ALG_ASYNC,
2823 }
2824 },
2825 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2826 DESC_HDR_SEL0_MDEUB |
2827 DESC_HDR_MODE0_MDEUB_SHA384,
2828 },
2829 { .type = CRYPTO_ALG_TYPE_AHASH,
2830 .alg.hash = {
2831 .halg.digestsize = SHA512_DIGEST_SIZE,
2832 .halg.statesize = sizeof(struct talitos_export_state),
2833 .halg.base = {
2834 .cra_name = "sha512",
2835 .cra_driver_name = "sha512-talitos",
2836 .cra_blocksize = SHA512_BLOCK_SIZE,
2837 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2838 CRYPTO_ALG_ASYNC,
2839 }
2840 },
2841 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2842 DESC_HDR_SEL0_MDEUB |
2843 DESC_HDR_MODE0_MDEUB_SHA512,
2844 },
2845 { .type = CRYPTO_ALG_TYPE_AHASH,
2846 .alg.hash = {
2847 .halg.digestsize = MD5_DIGEST_SIZE,
2848 .halg.statesize = sizeof(struct talitos_export_state),
2849 .halg.base = {
2850 .cra_name = "hmac(md5)",
2851 .cra_driver_name = "hmac-md5-talitos",
2852 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2853 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2854 CRYPTO_ALG_ASYNC,
2855 }
2856 },
2857 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2858 DESC_HDR_SEL0_MDEUA |
2859 DESC_HDR_MODE0_MDEU_MD5,
2860 },
2861 { .type = CRYPTO_ALG_TYPE_AHASH,
2862 .alg.hash = {
2863 .halg.digestsize = SHA1_DIGEST_SIZE,
2864 .halg.statesize = sizeof(struct talitos_export_state),
2865 .halg.base = {
2866 .cra_name = "hmac(sha1)",
2867 .cra_driver_name = "hmac-sha1-talitos",
2868 .cra_blocksize = SHA1_BLOCK_SIZE,
2869 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2870 CRYPTO_ALG_ASYNC,
2871 }
2872 },
2873 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2874 DESC_HDR_SEL0_MDEUA |
2875 DESC_HDR_MODE0_MDEU_SHA1,
2876 },
2877 { .type = CRYPTO_ALG_TYPE_AHASH,
2878 .alg.hash = {
2879 .halg.digestsize = SHA224_DIGEST_SIZE,
2880 .halg.statesize = sizeof(struct talitos_export_state),
2881 .halg.base = {
2882 .cra_name = "hmac(sha224)",
2883 .cra_driver_name = "hmac-sha224-talitos",
2884 .cra_blocksize = SHA224_BLOCK_SIZE,
2885 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2886 CRYPTO_ALG_ASYNC,
2887 }
2888 },
2889 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2890 DESC_HDR_SEL0_MDEUA |
2891 DESC_HDR_MODE0_MDEU_SHA224,
2892 },
2893 { .type = CRYPTO_ALG_TYPE_AHASH,
2894 .alg.hash = {
2895 .halg.digestsize = SHA256_DIGEST_SIZE,
2896 .halg.statesize = sizeof(struct talitos_export_state),
2897 .halg.base = {
2898 .cra_name = "hmac(sha256)",
2899 .cra_driver_name = "hmac-sha256-talitos",
2900 .cra_blocksize = SHA256_BLOCK_SIZE,
2901 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2902 CRYPTO_ALG_ASYNC,
2903 }
2904 },
2905 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2906 DESC_HDR_SEL0_MDEUA |
2907 DESC_HDR_MODE0_MDEU_SHA256,
2908 },
2909 { .type = CRYPTO_ALG_TYPE_AHASH,
2910 .alg.hash = {
2911 .halg.digestsize = SHA384_DIGEST_SIZE,
2912 .halg.statesize = sizeof(struct talitos_export_state),
2913 .halg.base = {
2914 .cra_name = "hmac(sha384)",
2915 .cra_driver_name = "hmac-sha384-talitos",
2916 .cra_blocksize = SHA384_BLOCK_SIZE,
2917 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2918 CRYPTO_ALG_ASYNC,
2919 }
2920 },
2921 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2922 DESC_HDR_SEL0_MDEUB |
2923 DESC_HDR_MODE0_MDEUB_SHA384,
2924 },
2925 { .type = CRYPTO_ALG_TYPE_AHASH,
2926 .alg.hash = {
2927 .halg.digestsize = SHA512_DIGEST_SIZE,
2928 .halg.statesize = sizeof(struct talitos_export_state),
2929 .halg.base = {
2930 .cra_name = "hmac(sha512)",
2931 .cra_driver_name = "hmac-sha512-talitos",
2932 .cra_blocksize = SHA512_BLOCK_SIZE,
2933 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2934 CRYPTO_ALG_ASYNC,
2935 }
2936 },
2937 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2938 DESC_HDR_SEL0_MDEUB |
2939 DESC_HDR_MODE0_MDEUB_SHA512,
2940 }
2941 };
2942
2943 struct talitos_crypto_alg {
2944 struct list_head entry;
2945 struct device *dev;
2946 struct talitos_alg_template algt;
2947 };
2948
talitos_init_common(struct talitos_ctx * ctx,struct talitos_crypto_alg * talitos_alg)2949 static int talitos_init_common(struct talitos_ctx *ctx,
2950 struct talitos_crypto_alg *talitos_alg)
2951 {
2952 struct talitos_private *priv;
2953
2954 /* update context with ptr to dev */
2955 ctx->dev = talitos_alg->dev;
2956
2957 /* assign SEC channel to tfm in round-robin fashion */
2958 priv = dev_get_drvdata(ctx->dev);
2959 ctx->ch = atomic_inc_return(&priv->last_chan) &
2960 (priv->num_channels - 1);
2961
2962 /* copy descriptor header template value */
2963 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2964
2965 /* select done notification */
2966 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2967
2968 return 0;
2969 }
2970
talitos_cra_init(struct crypto_tfm * tfm)2971 static int talitos_cra_init(struct crypto_tfm *tfm)
2972 {
2973 struct crypto_alg *alg = tfm->__crt_alg;
2974 struct talitos_crypto_alg *talitos_alg;
2975 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2976
2977 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2978 talitos_alg = container_of(__crypto_ahash_alg(alg),
2979 struct talitos_crypto_alg,
2980 algt.alg.hash);
2981 else
2982 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2983 algt.alg.crypto);
2984
2985 return talitos_init_common(ctx, talitos_alg);
2986 }
2987
talitos_cra_init_aead(struct crypto_aead * tfm)2988 static int talitos_cra_init_aead(struct crypto_aead *tfm)
2989 {
2990 struct aead_alg *alg = crypto_aead_alg(tfm);
2991 struct talitos_crypto_alg *talitos_alg;
2992 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2993
2994 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2995 algt.alg.aead);
2996
2997 return talitos_init_common(ctx, talitos_alg);
2998 }
2999
talitos_cra_init_ahash(struct crypto_tfm * tfm)3000 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3001 {
3002 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3003
3004 talitos_cra_init(tfm);
3005
3006 ctx->keylen = 0;
3007 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3008 sizeof(struct talitos_ahash_req_ctx));
3009
3010 return 0;
3011 }
3012
3013 /*
3014 * given the alg's descriptor header template, determine whether descriptor
3015 * type and primary/secondary execution units required match the hw
3016 * capabilities description provided in the device tree node.
3017 */
hw_supports(struct device * dev,__be32 desc_hdr_template)3018 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3019 {
3020 struct talitos_private *priv = dev_get_drvdata(dev);
3021 int ret;
3022
3023 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3024 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3025
3026 if (SECONDARY_EU(desc_hdr_template))
3027 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3028 & priv->exec_units);
3029
3030 return ret;
3031 }
3032
talitos_remove(struct platform_device * ofdev)3033 static int talitos_remove(struct platform_device *ofdev)
3034 {
3035 struct device *dev = &ofdev->dev;
3036 struct talitos_private *priv = dev_get_drvdata(dev);
3037 struct talitos_crypto_alg *t_alg, *n;
3038 int i;
3039
3040 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3041 switch (t_alg->algt.type) {
3042 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3043 break;
3044 case CRYPTO_ALG_TYPE_AEAD:
3045 crypto_unregister_aead(&t_alg->algt.alg.aead);
3046 break;
3047 case CRYPTO_ALG_TYPE_AHASH:
3048 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3049 break;
3050 }
3051 list_del(&t_alg->entry);
3052 kfree(t_alg);
3053 }
3054
3055 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3056 talitos_unregister_rng(dev);
3057
3058 for (i = 0; priv->chan && i < priv->num_channels; i++)
3059 kfree(priv->chan[i].fifo);
3060
3061 kfree(priv->chan);
3062
3063 for (i = 0; i < 2; i++)
3064 if (priv->irq[i]) {
3065 free_irq(priv->irq[i], dev);
3066 irq_dispose_mapping(priv->irq[i]);
3067 }
3068
3069 tasklet_kill(&priv->done_task[0]);
3070 if (priv->irq[1])
3071 tasklet_kill(&priv->done_task[1]);
3072
3073 iounmap(priv->reg);
3074
3075 kfree(priv);
3076
3077 return 0;
3078 }
3079
talitos_alg_alloc(struct device * dev,struct talitos_alg_template * template)3080 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3081 struct talitos_alg_template
3082 *template)
3083 {
3084 struct talitos_private *priv = dev_get_drvdata(dev);
3085 struct talitos_crypto_alg *t_alg;
3086 struct crypto_alg *alg;
3087
3088 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
3089 if (!t_alg)
3090 return ERR_PTR(-ENOMEM);
3091
3092 t_alg->algt = *template;
3093
3094 switch (t_alg->algt.type) {
3095 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3096 alg = &t_alg->algt.alg.crypto;
3097 alg->cra_init = talitos_cra_init;
3098 alg->cra_type = &crypto_ablkcipher_type;
3099 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
3100 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3101 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3102 alg->cra_ablkcipher.geniv = "eseqiv";
3103 break;
3104 case CRYPTO_ALG_TYPE_AEAD:
3105 alg = &t_alg->algt.alg.aead.base;
3106 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3107 t_alg->algt.alg.aead.setkey = aead_setkey;
3108 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3109 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3110 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3111 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3112 kfree(t_alg);
3113 return ERR_PTR(-ENOTSUPP);
3114 }
3115 break;
3116 case CRYPTO_ALG_TYPE_AHASH:
3117 alg = &t_alg->algt.alg.hash.halg.base;
3118 alg->cra_init = talitos_cra_init_ahash;
3119 alg->cra_type = &crypto_ahash_type;
3120 t_alg->algt.alg.hash.init = ahash_init;
3121 t_alg->algt.alg.hash.update = ahash_update;
3122 t_alg->algt.alg.hash.final = ahash_final;
3123 t_alg->algt.alg.hash.finup = ahash_finup;
3124 t_alg->algt.alg.hash.digest = ahash_digest;
3125 if (!strncmp(alg->cra_name, "hmac", 4))
3126 t_alg->algt.alg.hash.setkey = ahash_setkey;
3127 t_alg->algt.alg.hash.import = ahash_import;
3128 t_alg->algt.alg.hash.export = ahash_export;
3129
3130 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3131 !strncmp(alg->cra_name, "hmac", 4)) {
3132 kfree(t_alg);
3133 return ERR_PTR(-ENOTSUPP);
3134 }
3135 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3136 (!strcmp(alg->cra_name, "sha224") ||
3137 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3138 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3139 t_alg->algt.desc_hdr_template =
3140 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3141 DESC_HDR_SEL0_MDEUA |
3142 DESC_HDR_MODE0_MDEU_SHA256;
3143 }
3144 break;
3145 default:
3146 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3147 kfree(t_alg);
3148 return ERR_PTR(-EINVAL);
3149 }
3150
3151 alg->cra_module = THIS_MODULE;
3152 if (t_alg->algt.priority)
3153 alg->cra_priority = t_alg->algt.priority;
3154 else
3155 alg->cra_priority = TALITOS_CRA_PRIORITY;
3156 if (has_ftr_sec1(priv))
3157 alg->cra_alignmask = 3;
3158 else
3159 alg->cra_alignmask = 0;
3160 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3161 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3162
3163 t_alg->dev = dev;
3164
3165 return t_alg;
3166 }
3167
talitos_probe_irq(struct platform_device * ofdev)3168 static int talitos_probe_irq(struct platform_device *ofdev)
3169 {
3170 struct device *dev = &ofdev->dev;
3171 struct device_node *np = ofdev->dev.of_node;
3172 struct talitos_private *priv = dev_get_drvdata(dev);
3173 int err;
3174 bool is_sec1 = has_ftr_sec1(priv);
3175
3176 priv->irq[0] = irq_of_parse_and_map(np, 0);
3177 if (!priv->irq[0]) {
3178 dev_err(dev, "failed to map irq\n");
3179 return -EINVAL;
3180 }
3181 if (is_sec1) {
3182 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3183 dev_driver_string(dev), dev);
3184 goto primary_out;
3185 }
3186
3187 priv->irq[1] = irq_of_parse_and_map(np, 1);
3188
3189 /* get the primary irq line */
3190 if (!priv->irq[1]) {
3191 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3192 dev_driver_string(dev), dev);
3193 goto primary_out;
3194 }
3195
3196 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3197 dev_driver_string(dev), dev);
3198 if (err)
3199 goto primary_out;
3200
3201 /* get the secondary irq line */
3202 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3203 dev_driver_string(dev), dev);
3204 if (err) {
3205 dev_err(dev, "failed to request secondary irq\n");
3206 irq_dispose_mapping(priv->irq[1]);
3207 priv->irq[1] = 0;
3208 }
3209
3210 return err;
3211
3212 primary_out:
3213 if (err) {
3214 dev_err(dev, "failed to request primary irq\n");
3215 irq_dispose_mapping(priv->irq[0]);
3216 priv->irq[0] = 0;
3217 }
3218
3219 return err;
3220 }
3221
talitos_probe(struct platform_device * ofdev)3222 static int talitos_probe(struct platform_device *ofdev)
3223 {
3224 struct device *dev = &ofdev->dev;
3225 struct device_node *np = ofdev->dev.of_node;
3226 struct talitos_private *priv;
3227 const unsigned int *prop;
3228 int i, err;
3229 int stride;
3230
3231 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
3232 if (!priv)
3233 return -ENOMEM;
3234
3235 INIT_LIST_HEAD(&priv->alg_list);
3236
3237 dev_set_drvdata(dev, priv);
3238
3239 priv->ofdev = ofdev;
3240
3241 spin_lock_init(&priv->reg_lock);
3242
3243 priv->reg = of_iomap(np, 0);
3244 if (!priv->reg) {
3245 dev_err(dev, "failed to of_iomap\n");
3246 err = -ENOMEM;
3247 goto err_out;
3248 }
3249
3250 /* get SEC version capabilities from device tree */
3251 prop = of_get_property(np, "fsl,num-channels", NULL);
3252 if (prop)
3253 priv->num_channels = *prop;
3254
3255 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
3256 if (prop)
3257 priv->chfifo_len = *prop;
3258
3259 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
3260 if (prop)
3261 priv->exec_units = *prop;
3262
3263 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
3264 if (prop)
3265 priv->desc_types = *prop;
3266
3267 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3268 !priv->exec_units || !priv->desc_types) {
3269 dev_err(dev, "invalid property data in device tree node\n");
3270 err = -EINVAL;
3271 goto err_out;
3272 }
3273
3274 if (of_device_is_compatible(np, "fsl,sec3.0"))
3275 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3276
3277 if (of_device_is_compatible(np, "fsl,sec2.1"))
3278 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3279 TALITOS_FTR_SHA224_HWINIT |
3280 TALITOS_FTR_HMAC_OK;
3281
3282 if (of_device_is_compatible(np, "fsl,sec1.0"))
3283 priv->features |= TALITOS_FTR_SEC1;
3284
3285 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3286 priv->reg_deu = priv->reg + TALITOS12_DEU;
3287 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3288 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3289 stride = TALITOS1_CH_STRIDE;
3290 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3291 priv->reg_deu = priv->reg + TALITOS10_DEU;
3292 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3293 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3294 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3295 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3296 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3297 stride = TALITOS1_CH_STRIDE;
3298 } else {
3299 priv->reg_deu = priv->reg + TALITOS2_DEU;
3300 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3301 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3302 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3303 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3304 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3305 priv->reg_keu = priv->reg + TALITOS2_KEU;
3306 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3307 stride = TALITOS2_CH_STRIDE;
3308 }
3309
3310 err = talitos_probe_irq(ofdev);
3311 if (err)
3312 goto err_out;
3313
3314 if (of_device_is_compatible(np, "fsl,sec1.0")) {
3315 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3316 (unsigned long)dev);
3317 } else {
3318 if (!priv->irq[1]) {
3319 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3320 (unsigned long)dev);
3321 } else {
3322 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3323 (unsigned long)dev);
3324 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3325 (unsigned long)dev);
3326 }
3327 }
3328
3329 priv->chan = kzalloc(sizeof(struct talitos_channel) *
3330 priv->num_channels, GFP_KERNEL);
3331 if (!priv->chan) {
3332 dev_err(dev, "failed to allocate channel management space\n");
3333 err = -ENOMEM;
3334 goto err_out;
3335 }
3336
3337 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3338
3339 for (i = 0; i < priv->num_channels; i++) {
3340 priv->chan[i].reg = priv->reg + stride * (i + 1);
3341 if (!priv->irq[1] || !(i & 1))
3342 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3343
3344 spin_lock_init(&priv->chan[i].head_lock);
3345 spin_lock_init(&priv->chan[i].tail_lock);
3346
3347 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
3348 priv->fifo_len, GFP_KERNEL);
3349 if (!priv->chan[i].fifo) {
3350 dev_err(dev, "failed to allocate request fifo %d\n", i);
3351 err = -ENOMEM;
3352 goto err_out;
3353 }
3354
3355 atomic_set(&priv->chan[i].submit_count,
3356 -(priv->chfifo_len - 1));
3357 }
3358
3359 dma_set_mask(dev, DMA_BIT_MASK(36));
3360
3361 /* reset and initialize the h/w */
3362 err = init_device(dev);
3363 if (err) {
3364 dev_err(dev, "failed to initialize device\n");
3365 goto err_out;
3366 }
3367
3368 /* register the RNG, if available */
3369 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3370 err = talitos_register_rng(dev);
3371 if (err) {
3372 dev_err(dev, "failed to register hwrng: %d\n", err);
3373 goto err_out;
3374 } else
3375 dev_info(dev, "hwrng\n");
3376 }
3377
3378 /* register crypto algorithms the device supports */
3379 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3380 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3381 struct talitos_crypto_alg *t_alg;
3382 struct crypto_alg *alg = NULL;
3383
3384 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3385 if (IS_ERR(t_alg)) {
3386 err = PTR_ERR(t_alg);
3387 if (err == -ENOTSUPP)
3388 continue;
3389 goto err_out;
3390 }
3391
3392 switch (t_alg->algt.type) {
3393 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3394 err = crypto_register_alg(
3395 &t_alg->algt.alg.crypto);
3396 alg = &t_alg->algt.alg.crypto;
3397 break;
3398
3399 case CRYPTO_ALG_TYPE_AEAD:
3400 err = crypto_register_aead(
3401 &t_alg->algt.alg.aead);
3402 alg = &t_alg->algt.alg.aead.base;
3403 break;
3404
3405 case CRYPTO_ALG_TYPE_AHASH:
3406 err = crypto_register_ahash(
3407 &t_alg->algt.alg.hash);
3408 alg = &t_alg->algt.alg.hash.halg.base;
3409 break;
3410 }
3411 if (err) {
3412 dev_err(dev, "%s alg registration failed\n",
3413 alg->cra_driver_name);
3414 kfree(t_alg);
3415 } else
3416 list_add_tail(&t_alg->entry, &priv->alg_list);
3417 }
3418 }
3419 if (!list_empty(&priv->alg_list))
3420 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3421 (char *)of_get_property(np, "compatible", NULL));
3422
3423 return 0;
3424
3425 err_out:
3426 talitos_remove(ofdev);
3427
3428 return err;
3429 }
3430
3431 static const struct of_device_id talitos_match[] = {
3432 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3433 {
3434 .compatible = "fsl,sec1.0",
3435 },
3436 #endif
3437 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3438 {
3439 .compatible = "fsl,sec2.0",
3440 },
3441 #endif
3442 {},
3443 };
3444 MODULE_DEVICE_TABLE(of, talitos_match);
3445
3446 static struct platform_driver talitos_driver = {
3447 .driver = {
3448 .name = "talitos",
3449 .of_match_table = talitos_match,
3450 },
3451 .probe = talitos_probe,
3452 .remove = talitos_remove,
3453 };
3454
3455 module_platform_driver(talitos_driver);
3456
3457 MODULE_LICENSE("GPL");
3458 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3459 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
3460