1 /*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
40 * | (output length) |
41 * | SEQ_IN_PTR |
42 * | (input buffer) |
43 * | (input length) |
44 * ---------------------
45 */
46
47 #include "compat.h"
48
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
56
57 /*
58 * crypto alg
59 */
60 #define CAAM_CRA_PRIORITY 3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 SHA512_DIGEST_SIZE * 2)
64 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65 #define CAAM_MAX_IV_LENGTH 16
66
67 /* length of descriptors text */
68 #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
69 #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
70 #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
71 #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72
73 #define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
74 #define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
75 #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
76
77 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
78 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
79 20 * CAAM_CMD_SZ)
80 #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
81 15 * CAAM_CMD_SZ)
82
83 #define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
84 CAAM_MAX_KEY_SIZE)
85 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
86
87 #ifdef DEBUG
88 /* for print_hex_dumps with line references */
89 #define debug(format, arg...) printk(format, arg)
90 #else
91 #define debug(format, arg...)
92 #endif
93 static struct list_head alg_list;
94
95 /* Set DK bit in class 1 operation if shared */
append_dec_op1(u32 * desc,u32 type)96 static inline void append_dec_op1(u32 *desc, u32 type)
97 {
98 u32 *jump_cmd, *uncond_jump_cmd;
99
100 /* DK bit is valid only for AES */
101 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
102 append_operation(desc, type | OP_ALG_AS_INITFINAL |
103 OP_ALG_DECRYPT);
104 return;
105 }
106
107 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
108 append_operation(desc, type | OP_ALG_AS_INITFINAL |
109 OP_ALG_DECRYPT);
110 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
111 set_jump_tgt_here(desc, jump_cmd);
112 append_operation(desc, type | OP_ALG_AS_INITFINAL |
113 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
114 set_jump_tgt_here(desc, uncond_jump_cmd);
115 }
116
117 /*
118 * For aead functions, read payload and write payload,
119 * both of which are specified in req->src and req->dst
120 */
aead_append_src_dst(u32 * desc,u32 msg_type)121 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
122 {
123 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
124 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
125 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
126 }
127
128 /*
129 * For aead encrypt and decrypt, read iv for both classes
130 */
aead_append_ld_iv(u32 * desc,int ivsize)131 static inline void aead_append_ld_iv(u32 *desc, int ivsize)
132 {
133 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
134 LDST_CLASS_1_CCB | ivsize);
135 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
136 }
137
138 /*
139 * For ablkcipher encrypt and decrypt, read from req->src and
140 * write to req->dst
141 */
ablkcipher_append_src_dst(u32 * desc)142 static inline void ablkcipher_append_src_dst(u32 *desc)
143 {
144 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
145 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
146 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
147 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
148 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
149 }
150
151 /*
152 * If all data, including src (with assoc and iv) or dst (with iv only) are
153 * contiguous
154 */
155 #define GIV_SRC_CONTIG 1
156 #define GIV_DST_CONTIG (1 << 1)
157
158 /*
159 * per-session context
160 */
161 struct caam_ctx {
162 struct device *jrdev;
163 u32 sh_desc_enc[DESC_MAX_USED_LEN];
164 u32 sh_desc_dec[DESC_MAX_USED_LEN];
165 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
166 dma_addr_t sh_desc_enc_dma;
167 dma_addr_t sh_desc_dec_dma;
168 dma_addr_t sh_desc_givenc_dma;
169 u32 class1_alg_type;
170 u32 class2_alg_type;
171 u32 alg_op;
172 u8 key[CAAM_MAX_KEY_SIZE];
173 dma_addr_t key_dma;
174 unsigned int enckeylen;
175 unsigned int split_key_len;
176 unsigned int split_key_pad_len;
177 unsigned int authsize;
178 };
179
append_key_aead(u32 * desc,struct caam_ctx * ctx,int keys_fit_inline)180 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
181 int keys_fit_inline)
182 {
183 if (keys_fit_inline) {
184 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
185 ctx->split_key_len, CLASS_2 |
186 KEY_DEST_MDHA_SPLIT | KEY_ENC);
187 append_key_as_imm(desc, (void *)ctx->key +
188 ctx->split_key_pad_len, ctx->enckeylen,
189 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
190 } else {
191 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
192 KEY_DEST_MDHA_SPLIT | KEY_ENC);
193 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
194 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
195 }
196 }
197
init_sh_desc_key_aead(u32 * desc,struct caam_ctx * ctx,int keys_fit_inline)198 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
199 int keys_fit_inline)
200 {
201 u32 *key_jump_cmd;
202
203 init_sh_desc(desc, HDR_SHARE_SERIAL);
204
205 /* Skip if already shared */
206 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
207 JUMP_COND_SHRD);
208
209 append_key_aead(desc, ctx, keys_fit_inline);
210
211 set_jump_tgt_here(desc, key_jump_cmd);
212 }
213
aead_null_set_sh_desc(struct crypto_aead * aead)214 static int aead_null_set_sh_desc(struct crypto_aead *aead)
215 {
216 struct aead_tfm *tfm = &aead->base.crt_aead;
217 struct caam_ctx *ctx = crypto_aead_ctx(aead);
218 struct device *jrdev = ctx->jrdev;
219 bool keys_fit_inline = false;
220 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
221 u32 *desc;
222
223 /*
224 * Job Descriptor and Shared Descriptors
225 * must all fit into the 64-word Descriptor h/w Buffer
226 */
227 if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
228 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
229 keys_fit_inline = true;
230
231 /* aead_encrypt shared descriptor */
232 desc = ctx->sh_desc_enc;
233
234 init_sh_desc(desc, HDR_SHARE_SERIAL);
235
236 /* Skip if already shared */
237 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
238 JUMP_COND_SHRD);
239 if (keys_fit_inline)
240 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
241 ctx->split_key_len, CLASS_2 |
242 KEY_DEST_MDHA_SPLIT | KEY_ENC);
243 else
244 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
245 KEY_DEST_MDHA_SPLIT | KEY_ENC);
246 set_jump_tgt_here(desc, key_jump_cmd);
247
248 /* cryptlen = seqoutlen - authsize */
249 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
250
251 /*
252 * NULL encryption; IV is zero
253 * assoclen = (assoclen + cryptlen) - cryptlen
254 */
255 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
256
257 /* read assoc before reading payload */
258 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
259 KEY_VLF);
260
261 /* Prepare to read and write cryptlen bytes */
262 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
263 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
264
265 /*
266 * MOVE_LEN opcode is not available in all SEC HW revisions,
267 * thus need to do some magic, i.e. self-patch the descriptor
268 * buffer.
269 */
270 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
271 MOVE_DEST_MATH3 |
272 (0x6 << MOVE_LEN_SHIFT));
273 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
274 MOVE_DEST_DESCBUF |
275 MOVE_WAITCOMP |
276 (0x8 << MOVE_LEN_SHIFT));
277
278 /* Class 2 operation */
279 append_operation(desc, ctx->class2_alg_type |
280 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
281
282 /* Read and write cryptlen bytes */
283 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
284
285 set_move_tgt_here(desc, read_move_cmd);
286 set_move_tgt_here(desc, write_move_cmd);
287 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
288 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
289 MOVE_AUX_LS);
290
291 /* Write ICV */
292 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
293 LDST_SRCDST_BYTE_CONTEXT);
294
295 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
296 desc_bytes(desc),
297 DMA_TO_DEVICE);
298 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
299 dev_err(jrdev, "unable to map shared descriptor\n");
300 return -ENOMEM;
301 }
302 #ifdef DEBUG
303 print_hex_dump(KERN_ERR,
304 "aead null enc shdesc@"__stringify(__LINE__)": ",
305 DUMP_PREFIX_ADDRESS, 16, 4, desc,
306 desc_bytes(desc), 1);
307 #endif
308
309 /*
310 * Job Descriptor and Shared Descriptors
311 * must all fit into the 64-word Descriptor h/w Buffer
312 */
313 keys_fit_inline = false;
314 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
315 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
316 keys_fit_inline = true;
317
318 desc = ctx->sh_desc_dec;
319
320 /* aead_decrypt shared descriptor */
321 init_sh_desc(desc, HDR_SHARE_SERIAL);
322
323 /* Skip if already shared */
324 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
325 JUMP_COND_SHRD);
326 if (keys_fit_inline)
327 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
328 ctx->split_key_len, CLASS_2 |
329 KEY_DEST_MDHA_SPLIT | KEY_ENC);
330 else
331 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
332 KEY_DEST_MDHA_SPLIT | KEY_ENC);
333 set_jump_tgt_here(desc, key_jump_cmd);
334
335 /* Class 2 operation */
336 append_operation(desc, ctx->class2_alg_type |
337 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
338
339 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
340 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
341 ctx->authsize + tfm->ivsize);
342 /* assoclen = (assoclen + cryptlen) - cryptlen */
343 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
344 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
345
346 /* read assoc before reading payload */
347 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
348 KEY_VLF);
349
350 /* Prepare to read and write cryptlen bytes */
351 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
352 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
353
354 /*
355 * MOVE_LEN opcode is not available in all SEC HW revisions,
356 * thus need to do some magic, i.e. self-patch the descriptor
357 * buffer.
358 */
359 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
360 MOVE_DEST_MATH2 |
361 (0x6 << MOVE_LEN_SHIFT));
362 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
363 MOVE_DEST_DESCBUF |
364 MOVE_WAITCOMP |
365 (0x8 << MOVE_LEN_SHIFT));
366
367 /* Read and write cryptlen bytes */
368 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
369
370 /*
371 * Insert a NOP here, since we need at least 4 instructions between
372 * code patching the descriptor buffer and the location being patched.
373 */
374 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
375 set_jump_tgt_here(desc, jump_cmd);
376
377 set_move_tgt_here(desc, read_move_cmd);
378 set_move_tgt_here(desc, write_move_cmd);
379 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
380 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
381 MOVE_AUX_LS);
382 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
383
384 /* Load ICV */
385 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
386 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
387
388 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
389 desc_bytes(desc),
390 DMA_TO_DEVICE);
391 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
392 dev_err(jrdev, "unable to map shared descriptor\n");
393 return -ENOMEM;
394 }
395 #ifdef DEBUG
396 print_hex_dump(KERN_ERR,
397 "aead null dec shdesc@"__stringify(__LINE__)": ",
398 DUMP_PREFIX_ADDRESS, 16, 4, desc,
399 desc_bytes(desc), 1);
400 #endif
401
402 return 0;
403 }
404
aead_set_sh_desc(struct crypto_aead * aead)405 static int aead_set_sh_desc(struct crypto_aead *aead)
406 {
407 struct aead_tfm *tfm = &aead->base.crt_aead;
408 struct caam_ctx *ctx = crypto_aead_ctx(aead);
409 struct device *jrdev = ctx->jrdev;
410 bool keys_fit_inline = false;
411 u32 geniv, moveiv;
412 u32 *desc;
413
414 if (!ctx->authsize)
415 return 0;
416
417 /* NULL encryption / decryption */
418 if (!ctx->enckeylen)
419 return aead_null_set_sh_desc(aead);
420
421 /*
422 * Job Descriptor and Shared Descriptors
423 * must all fit into the 64-word Descriptor h/w Buffer
424 */
425 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
426 ctx->split_key_pad_len + ctx->enckeylen <=
427 CAAM_DESC_BYTES_MAX)
428 keys_fit_inline = true;
429
430 /* aead_encrypt shared descriptor */
431 desc = ctx->sh_desc_enc;
432
433 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
434
435 /* Class 2 operation */
436 append_operation(desc, ctx->class2_alg_type |
437 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
438
439 /* cryptlen = seqoutlen - authsize */
440 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
441
442 /* assoclen + cryptlen = seqinlen - ivsize */
443 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
444
445 /* assoclen = (assoclen + cryptlen) - cryptlen */
446 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
447
448 /* read assoc before reading payload */
449 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
450 KEY_VLF);
451 aead_append_ld_iv(desc, tfm->ivsize);
452
453 /* Class 1 operation */
454 append_operation(desc, ctx->class1_alg_type |
455 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
456
457 /* Read and write cryptlen bytes */
458 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
459 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
460 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
461
462 /* Write ICV */
463 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
464 LDST_SRCDST_BYTE_CONTEXT);
465
466 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
467 desc_bytes(desc),
468 DMA_TO_DEVICE);
469 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
470 dev_err(jrdev, "unable to map shared descriptor\n");
471 return -ENOMEM;
472 }
473 #ifdef DEBUG
474 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
475 DUMP_PREFIX_ADDRESS, 16, 4, desc,
476 desc_bytes(desc), 1);
477 #endif
478
479 /*
480 * Job Descriptor and Shared Descriptors
481 * must all fit into the 64-word Descriptor h/w Buffer
482 */
483 keys_fit_inline = false;
484 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
485 ctx->split_key_pad_len + ctx->enckeylen <=
486 CAAM_DESC_BYTES_MAX)
487 keys_fit_inline = true;
488
489 /* aead_decrypt shared descriptor */
490 desc = ctx->sh_desc_dec;
491
492 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
493
494 /* Class 2 operation */
495 append_operation(desc, ctx->class2_alg_type |
496 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
497
498 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
499 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
500 ctx->authsize + tfm->ivsize);
501 /* assoclen = (assoclen + cryptlen) - cryptlen */
502 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
503 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
504
505 /* read assoc before reading payload */
506 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
507 KEY_VLF);
508
509 aead_append_ld_iv(desc, tfm->ivsize);
510
511 append_dec_op1(desc, ctx->class1_alg_type);
512
513 /* Read and write cryptlen bytes */
514 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
515 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
516 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
517
518 /* Load ICV */
519 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
520 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
521
522 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
523 desc_bytes(desc),
524 DMA_TO_DEVICE);
525 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
526 dev_err(jrdev, "unable to map shared descriptor\n");
527 return -ENOMEM;
528 }
529 #ifdef DEBUG
530 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
531 DUMP_PREFIX_ADDRESS, 16, 4, desc,
532 desc_bytes(desc), 1);
533 #endif
534
535 /*
536 * Job Descriptor and Shared Descriptors
537 * must all fit into the 64-word Descriptor h/w Buffer
538 */
539 keys_fit_inline = false;
540 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
541 ctx->split_key_pad_len + ctx->enckeylen <=
542 CAAM_DESC_BYTES_MAX)
543 keys_fit_inline = true;
544
545 /* aead_givencrypt shared descriptor */
546 desc = ctx->sh_desc_givenc;
547
548 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
549
550 /* Generate IV */
551 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
552 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
553 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
554 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
555 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
556 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
557 append_move(desc, MOVE_SRC_INFIFO |
558 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
559 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
560
561 /* Copy IV to class 1 context */
562 append_move(desc, MOVE_SRC_CLASS1CTX |
563 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
564
565 /* Return to encryption */
566 append_operation(desc, ctx->class2_alg_type |
567 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
568
569 /* ivsize + cryptlen = seqoutlen - authsize */
570 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
571
572 /* assoclen = seqinlen - (ivsize + cryptlen) */
573 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
574
575 /* read assoc before reading payload */
576 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
577 KEY_VLF);
578
579 /* Copy iv from class 1 ctx to class 2 fifo*/
580 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
581 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
582 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
583 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
584 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
585 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
586
587 /* Class 1 operation */
588 append_operation(desc, ctx->class1_alg_type |
589 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
590
591 /* Will write ivsize + cryptlen */
592 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
593
594 /* Not need to reload iv */
595 append_seq_fifo_load(desc, tfm->ivsize,
596 FIFOLD_CLASS_SKIP);
597
598 /* Will read cryptlen */
599 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
600 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
601 FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
602 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
603
604 /* Write ICV */
605 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
606 LDST_SRCDST_BYTE_CONTEXT);
607
608 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
609 desc_bytes(desc),
610 DMA_TO_DEVICE);
611 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
612 dev_err(jrdev, "unable to map shared descriptor\n");
613 return -ENOMEM;
614 }
615 #ifdef DEBUG
616 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
617 DUMP_PREFIX_ADDRESS, 16, 4, desc,
618 desc_bytes(desc), 1);
619 #endif
620
621 return 0;
622 }
623
aead_setauthsize(struct crypto_aead * authenc,unsigned int authsize)624 static int aead_setauthsize(struct crypto_aead *authenc,
625 unsigned int authsize)
626 {
627 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
628
629 ctx->authsize = authsize;
630 aead_set_sh_desc(authenc);
631
632 return 0;
633 }
634
gen_split_aead_key(struct caam_ctx * ctx,const u8 * key_in,u32 authkeylen)635 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
636 u32 authkeylen)
637 {
638 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
639 ctx->split_key_pad_len, key_in, authkeylen,
640 ctx->alg_op);
641 }
642
aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)643 static int aead_setkey(struct crypto_aead *aead,
644 const u8 *key, unsigned int keylen)
645 {
646 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
647 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
648 struct caam_ctx *ctx = crypto_aead_ctx(aead);
649 struct device *jrdev = ctx->jrdev;
650 struct crypto_authenc_keys keys;
651 int ret = 0;
652
653 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
654 goto badkey;
655
656 /* Pick class 2 key length from algorithm submask */
657 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
658 OP_ALG_ALGSEL_SHIFT] * 2;
659 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
660
661 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
662 goto badkey;
663
664 #ifdef DEBUG
665 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
666 keys.authkeylen + keys.enckeylen, keys.enckeylen,
667 keys.authkeylen);
668 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
669 ctx->split_key_len, ctx->split_key_pad_len);
670 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
671 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
672 #endif
673
674 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
675 if (ret) {
676 goto badkey;
677 }
678
679 /* postpend encryption key to auth split key */
680 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
681
682 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
683 keys.enckeylen, DMA_TO_DEVICE);
684 if (dma_mapping_error(jrdev, ctx->key_dma)) {
685 dev_err(jrdev, "unable to map key i/o memory\n");
686 return -ENOMEM;
687 }
688 #ifdef DEBUG
689 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
690 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
691 ctx->split_key_pad_len + keys.enckeylen, 1);
692 #endif
693
694 ctx->enckeylen = keys.enckeylen;
695
696 ret = aead_set_sh_desc(aead);
697 if (ret) {
698 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
699 keys.enckeylen, DMA_TO_DEVICE);
700 }
701
702 return ret;
703 badkey:
704 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
705 return -EINVAL;
706 }
707
ablkcipher_setkey(struct crypto_ablkcipher * ablkcipher,const u8 * key,unsigned int keylen)708 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
709 const u8 *key, unsigned int keylen)
710 {
711 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
712 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
713 struct device *jrdev = ctx->jrdev;
714 int ret = 0;
715 u32 *key_jump_cmd;
716 u32 *desc;
717
718 #ifdef DEBUG
719 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
720 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
721 #endif
722
723 memcpy(ctx->key, key, keylen);
724 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
725 DMA_TO_DEVICE);
726 if (dma_mapping_error(jrdev, ctx->key_dma)) {
727 dev_err(jrdev, "unable to map key i/o memory\n");
728 return -ENOMEM;
729 }
730 ctx->enckeylen = keylen;
731
732 /* ablkcipher_encrypt shared descriptor */
733 desc = ctx->sh_desc_enc;
734 init_sh_desc(desc, HDR_SHARE_SERIAL);
735 /* Skip if already shared */
736 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
737 JUMP_COND_SHRD);
738
739 /* Load class1 key only */
740 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
741 ctx->enckeylen, CLASS_1 |
742 KEY_DEST_CLASS_REG);
743
744 set_jump_tgt_here(desc, key_jump_cmd);
745
746 /* Load iv */
747 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
748 LDST_CLASS_1_CCB | tfm->ivsize);
749
750 /* Load operation */
751 append_operation(desc, ctx->class1_alg_type |
752 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
753
754 /* Perform operation */
755 ablkcipher_append_src_dst(desc);
756
757 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
758 desc_bytes(desc),
759 DMA_TO_DEVICE);
760 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
761 dev_err(jrdev, "unable to map shared descriptor\n");
762 return -ENOMEM;
763 }
764 #ifdef DEBUG
765 print_hex_dump(KERN_ERR,
766 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
767 DUMP_PREFIX_ADDRESS, 16, 4, desc,
768 desc_bytes(desc), 1);
769 #endif
770 /* ablkcipher_decrypt shared descriptor */
771 desc = ctx->sh_desc_dec;
772
773 init_sh_desc(desc, HDR_SHARE_SERIAL);
774 /* Skip if already shared */
775 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
776 JUMP_COND_SHRD);
777
778 /* Load class1 key only */
779 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
780 ctx->enckeylen, CLASS_1 |
781 KEY_DEST_CLASS_REG);
782
783 set_jump_tgt_here(desc, key_jump_cmd);
784
785 /* load IV */
786 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
787 LDST_CLASS_1_CCB | tfm->ivsize);
788
789 /* Choose operation */
790 append_dec_op1(desc, ctx->class1_alg_type);
791
792 /* Perform operation */
793 ablkcipher_append_src_dst(desc);
794
795 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
796 desc_bytes(desc),
797 DMA_TO_DEVICE);
798 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
799 dev_err(jrdev, "unable to map shared descriptor\n");
800 return -ENOMEM;
801 }
802
803 #ifdef DEBUG
804 print_hex_dump(KERN_ERR,
805 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
806 DUMP_PREFIX_ADDRESS, 16, 4, desc,
807 desc_bytes(desc), 1);
808 #endif
809
810 return ret;
811 }
812
813 /*
814 * aead_edesc - s/w-extended aead descriptor
815 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
816 * @assoc_chained: if source is chained
817 * @src_nents: number of segments in input scatterlist
818 * @src_chained: if source is chained
819 * @dst_nents: number of segments in output scatterlist
820 * @dst_chained: if destination is chained
821 * @iv_dma: dma address of iv for checking continuity and link table
822 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
823 * @sec4_sg_bytes: length of dma mapped sec4_sg space
824 * @sec4_sg_dma: bus physical mapped address of h/w link table
825 * @hw_desc: the h/w job descriptor followed by any referenced link tables
826 */
827 struct aead_edesc {
828 int assoc_nents;
829 bool assoc_chained;
830 int src_nents;
831 bool src_chained;
832 int dst_nents;
833 bool dst_chained;
834 dma_addr_t iv_dma;
835 int sec4_sg_bytes;
836 dma_addr_t sec4_sg_dma;
837 struct sec4_sg_entry *sec4_sg;
838 u32 hw_desc[0];
839 };
840
841 /*
842 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
843 * @src_nents: number of segments in input scatterlist
844 * @src_chained: if source is chained
845 * @dst_nents: number of segments in output scatterlist
846 * @dst_chained: if destination is chained
847 * @iv_dma: dma address of iv for checking continuity and link table
848 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
849 * @sec4_sg_bytes: length of dma mapped sec4_sg space
850 * @sec4_sg_dma: bus physical mapped address of h/w link table
851 * @hw_desc: the h/w job descriptor followed by any referenced link tables
852 */
853 struct ablkcipher_edesc {
854 int src_nents;
855 bool src_chained;
856 int dst_nents;
857 bool dst_chained;
858 dma_addr_t iv_dma;
859 int sec4_sg_bytes;
860 dma_addr_t sec4_sg_dma;
861 struct sec4_sg_entry *sec4_sg;
862 u32 hw_desc[0];
863 };
864
caam_unmap(struct device * dev,struct scatterlist * src,struct scatterlist * dst,int src_nents,bool src_chained,int dst_nents,bool dst_chained,dma_addr_t iv_dma,int ivsize,dma_addr_t sec4_sg_dma,int sec4_sg_bytes)865 static void caam_unmap(struct device *dev, struct scatterlist *src,
866 struct scatterlist *dst, int src_nents,
867 bool src_chained, int dst_nents, bool dst_chained,
868 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
869 int sec4_sg_bytes)
870 {
871 if (dst != src) {
872 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
873 src_chained);
874 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
875 dst_chained);
876 } else {
877 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
878 DMA_BIDIRECTIONAL, src_chained);
879 }
880
881 if (iv_dma)
882 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
883 if (sec4_sg_bytes)
884 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
885 DMA_TO_DEVICE);
886 }
887
aead_unmap(struct device * dev,struct aead_edesc * edesc,struct aead_request * req)888 static void aead_unmap(struct device *dev,
889 struct aead_edesc *edesc,
890 struct aead_request *req)
891 {
892 struct crypto_aead *aead = crypto_aead_reqtfm(req);
893 int ivsize = crypto_aead_ivsize(aead);
894
895 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
896 DMA_TO_DEVICE, edesc->assoc_chained);
897
898 caam_unmap(dev, req->src, req->dst,
899 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
900 edesc->dst_chained, edesc->iv_dma, ivsize,
901 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
902 }
903
ablkcipher_unmap(struct device * dev,struct ablkcipher_edesc * edesc,struct ablkcipher_request * req)904 static void ablkcipher_unmap(struct device *dev,
905 struct ablkcipher_edesc *edesc,
906 struct ablkcipher_request *req)
907 {
908 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
909 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
910
911 caam_unmap(dev, req->src, req->dst,
912 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
913 edesc->dst_chained, edesc->iv_dma, ivsize,
914 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
915 }
916
aead_encrypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)917 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
918 void *context)
919 {
920 struct aead_request *req = context;
921 struct aead_edesc *edesc;
922 #ifdef DEBUG
923 struct crypto_aead *aead = crypto_aead_reqtfm(req);
924 struct caam_ctx *ctx = crypto_aead_ctx(aead);
925 int ivsize = crypto_aead_ivsize(aead);
926
927 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
928 #endif
929
930 edesc = (struct aead_edesc *)((char *)desc -
931 offsetof(struct aead_edesc, hw_desc));
932
933 if (err)
934 caam_jr_strstatus(jrdev, err);
935
936 aead_unmap(jrdev, edesc, req);
937
938 #ifdef DEBUG
939 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
940 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
941 req->assoclen , 1);
942 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
943 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
944 edesc->src_nents ? 100 : ivsize, 1);
945 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
946 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
947 edesc->src_nents ? 100 : req->cryptlen +
948 ctx->authsize + 4, 1);
949 #endif
950
951 kfree(edesc);
952
953 aead_request_complete(req, err);
954 }
955
aead_decrypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)956 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
957 void *context)
958 {
959 struct aead_request *req = context;
960 struct aead_edesc *edesc;
961 #ifdef DEBUG
962 struct crypto_aead *aead = crypto_aead_reqtfm(req);
963 struct caam_ctx *ctx = crypto_aead_ctx(aead);
964 int ivsize = crypto_aead_ivsize(aead);
965
966 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
967 #endif
968
969 edesc = (struct aead_edesc *)((char *)desc -
970 offsetof(struct aead_edesc, hw_desc));
971
972 #ifdef DEBUG
973 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
974 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
975 ivsize, 1);
976 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
977 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
978 req->cryptlen - ctx->authsize, 1);
979 #endif
980
981 if (err)
982 caam_jr_strstatus(jrdev, err);
983
984 aead_unmap(jrdev, edesc, req);
985
986 /*
987 * verify hw auth check passed else return -EBADMSG
988 */
989 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
990 err = -EBADMSG;
991
992 #ifdef DEBUG
993 print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
994 DUMP_PREFIX_ADDRESS, 16, 4,
995 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
996 sizeof(struct iphdr) + req->assoclen +
997 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
998 ctx->authsize + 36, 1);
999 if (!err && edesc->sec4_sg_bytes) {
1000 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
1001 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
1002 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
1003 sg->length + ctx->authsize + 16, 1);
1004 }
1005 #endif
1006
1007 kfree(edesc);
1008
1009 aead_request_complete(req, err);
1010 }
1011
ablkcipher_encrypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)1012 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1013 void *context)
1014 {
1015 struct ablkcipher_request *req = context;
1016 struct ablkcipher_edesc *edesc;
1017 #ifdef DEBUG
1018 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1019 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1020
1021 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1022 #endif
1023
1024 edesc = (struct ablkcipher_edesc *)((char *)desc -
1025 offsetof(struct ablkcipher_edesc, hw_desc));
1026
1027 if (err)
1028 caam_jr_strstatus(jrdev, err);
1029
1030 #ifdef DEBUG
1031 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
1032 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1033 edesc->src_nents > 1 ? 100 : ivsize, 1);
1034 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
1035 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1036 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1037 #endif
1038
1039 ablkcipher_unmap(jrdev, edesc, req);
1040 kfree(edesc);
1041
1042 ablkcipher_request_complete(req, err);
1043 }
1044
ablkcipher_decrypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)1045 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1046 void *context)
1047 {
1048 struct ablkcipher_request *req = context;
1049 struct ablkcipher_edesc *edesc;
1050 #ifdef DEBUG
1051 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1052 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1053
1054 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1055 #endif
1056
1057 edesc = (struct ablkcipher_edesc *)((char *)desc -
1058 offsetof(struct ablkcipher_edesc, hw_desc));
1059 if (err)
1060 caam_jr_strstatus(jrdev, err);
1061
1062 #ifdef DEBUG
1063 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
1064 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1065 ivsize, 1);
1066 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
1067 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1068 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1069 #endif
1070
1071 ablkcipher_unmap(jrdev, edesc, req);
1072 kfree(edesc);
1073
1074 ablkcipher_request_complete(req, err);
1075 }
1076
1077 /*
1078 * Fill in aead job descriptor
1079 */
init_aead_job(u32 * sh_desc,dma_addr_t ptr,struct aead_edesc * edesc,struct aead_request * req,bool all_contig,bool encrypt)1080 static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1081 struct aead_edesc *edesc,
1082 struct aead_request *req,
1083 bool all_contig, bool encrypt)
1084 {
1085 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1086 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1087 int ivsize = crypto_aead_ivsize(aead);
1088 int authsize = ctx->authsize;
1089 u32 *desc = edesc->hw_desc;
1090 u32 out_options = 0, in_options;
1091 dma_addr_t dst_dma, src_dma;
1092 int len, sec4_sg_index = 0;
1093
1094 #ifdef DEBUG
1095 debug("assoclen %d cryptlen %d authsize %d\n",
1096 req->assoclen, req->cryptlen, authsize);
1097 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
1098 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1099 req->assoclen , 1);
1100 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1101 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1102 edesc->src_nents ? 100 : ivsize, 1);
1103 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
1104 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1105 edesc->src_nents ? 100 : req->cryptlen, 1);
1106 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
1107 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1108 desc_bytes(sh_desc), 1);
1109 #endif
1110
1111 len = desc_len(sh_desc);
1112 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1113
1114 if (all_contig) {
1115 src_dma = sg_dma_address(req->assoc);
1116 in_options = 0;
1117 } else {
1118 src_dma = edesc->sec4_sg_dma;
1119 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
1120 (edesc->src_nents ? : 1);
1121 in_options = LDST_SGF;
1122 }
1123
1124 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1125 in_options);
1126
1127 if (likely(req->src == req->dst)) {
1128 if (all_contig) {
1129 dst_dma = sg_dma_address(req->src);
1130 } else {
1131 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1132 ((edesc->assoc_nents ? : 1) + 1);
1133 out_options = LDST_SGF;
1134 }
1135 } else {
1136 if (!edesc->dst_nents) {
1137 dst_dma = sg_dma_address(req->dst);
1138 } else {
1139 dst_dma = edesc->sec4_sg_dma +
1140 sec4_sg_index *
1141 sizeof(struct sec4_sg_entry);
1142 out_options = LDST_SGF;
1143 }
1144 }
1145 if (encrypt)
1146 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
1147 out_options);
1148 else
1149 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1150 out_options);
1151 }
1152
1153 /*
1154 * Fill in aead givencrypt job descriptor
1155 */
init_aead_giv_job(u32 * sh_desc,dma_addr_t ptr,struct aead_edesc * edesc,struct aead_request * req,int contig)1156 static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1157 struct aead_edesc *edesc,
1158 struct aead_request *req,
1159 int contig)
1160 {
1161 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1162 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1163 int ivsize = crypto_aead_ivsize(aead);
1164 int authsize = ctx->authsize;
1165 u32 *desc = edesc->hw_desc;
1166 u32 out_options = 0, in_options;
1167 dma_addr_t dst_dma, src_dma;
1168 int len, sec4_sg_index = 0;
1169
1170 #ifdef DEBUG
1171 debug("assoclen %d cryptlen %d authsize %d\n",
1172 req->assoclen, req->cryptlen, authsize);
1173 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
1174 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1175 req->assoclen , 1);
1176 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1177 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1178 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
1179 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1180 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1181 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
1182 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1183 desc_bytes(sh_desc), 1);
1184 #endif
1185
1186 len = desc_len(sh_desc);
1187 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1188
1189 if (contig & GIV_SRC_CONTIG) {
1190 src_dma = sg_dma_address(req->assoc);
1191 in_options = 0;
1192 } else {
1193 src_dma = edesc->sec4_sg_dma;
1194 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1195 in_options = LDST_SGF;
1196 }
1197 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1198 in_options);
1199
1200 if (contig & GIV_DST_CONTIG) {
1201 dst_dma = edesc->iv_dma;
1202 } else {
1203 if (likely(req->src == req->dst)) {
1204 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1205 edesc->assoc_nents;
1206 out_options = LDST_SGF;
1207 } else {
1208 dst_dma = edesc->sec4_sg_dma +
1209 sec4_sg_index *
1210 sizeof(struct sec4_sg_entry);
1211 out_options = LDST_SGF;
1212 }
1213 }
1214
1215 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
1216 out_options);
1217 }
1218
1219 /*
1220 * Fill in ablkcipher job descriptor
1221 */
init_ablkcipher_job(u32 * sh_desc,dma_addr_t ptr,struct ablkcipher_edesc * edesc,struct ablkcipher_request * req,bool iv_contig)1222 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1223 struct ablkcipher_edesc *edesc,
1224 struct ablkcipher_request *req,
1225 bool iv_contig)
1226 {
1227 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1228 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1229 u32 *desc = edesc->hw_desc;
1230 u32 out_options = 0, in_options;
1231 dma_addr_t dst_dma, src_dma;
1232 int len, sec4_sg_index = 0;
1233
1234 #ifdef DEBUG
1235 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1236 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1237 ivsize, 1);
1238 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
1239 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1240 edesc->src_nents ? 100 : req->nbytes, 1);
1241 #endif
1242
1243 len = desc_len(sh_desc);
1244 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1245
1246 if (iv_contig) {
1247 src_dma = edesc->iv_dma;
1248 in_options = 0;
1249 } else {
1250 src_dma = edesc->sec4_sg_dma;
1251 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1252 in_options = LDST_SGF;
1253 }
1254 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1255
1256 if (likely(req->src == req->dst)) {
1257 if (!edesc->src_nents && iv_contig) {
1258 dst_dma = sg_dma_address(req->src);
1259 } else {
1260 dst_dma = edesc->sec4_sg_dma +
1261 sizeof(struct sec4_sg_entry);
1262 out_options = LDST_SGF;
1263 }
1264 } else {
1265 if (!edesc->dst_nents) {
1266 dst_dma = sg_dma_address(req->dst);
1267 } else {
1268 dst_dma = edesc->sec4_sg_dma +
1269 sec4_sg_index * sizeof(struct sec4_sg_entry);
1270 out_options = LDST_SGF;
1271 }
1272 }
1273 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1274 }
1275
1276 /*
1277 * allocate and map the aead extended descriptor
1278 */
aead_edesc_alloc(struct aead_request * req,int desc_bytes,bool * all_contig_ptr,bool encrypt)1279 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1280 int desc_bytes, bool *all_contig_ptr,
1281 bool encrypt)
1282 {
1283 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1284 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1285 struct device *jrdev = ctx->jrdev;
1286 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1287 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1288 int assoc_nents, src_nents, dst_nents = 0;
1289 struct aead_edesc *edesc;
1290 dma_addr_t iv_dma = 0;
1291 int sgc;
1292 bool all_contig = true;
1293 bool assoc_chained = false, src_chained = false, dst_chained = false;
1294 int ivsize = crypto_aead_ivsize(aead);
1295 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1296 unsigned int authsize = ctx->authsize;
1297
1298 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1299
1300 if (unlikely(req->dst != req->src)) {
1301 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1302 dst_nents = sg_count(req->dst,
1303 req->cryptlen +
1304 (encrypt ? authsize : (-authsize)),
1305 &dst_chained);
1306 } else {
1307 src_nents = sg_count(req->src,
1308 req->cryptlen +
1309 (encrypt ? authsize : 0),
1310 &src_chained);
1311 }
1312
1313 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1314 DMA_TO_DEVICE, assoc_chained);
1315 if (likely(req->src == req->dst)) {
1316 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1317 DMA_BIDIRECTIONAL, src_chained);
1318 } else {
1319 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1320 DMA_TO_DEVICE, src_chained);
1321 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1322 DMA_FROM_DEVICE, dst_chained);
1323 }
1324
1325 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1326 if (dma_mapping_error(jrdev, iv_dma)) {
1327 dev_err(jrdev, "unable to map IV\n");
1328 return ERR_PTR(-ENOMEM);
1329 }
1330
1331 /* Check if data are contiguous */
1332 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1333 iv_dma || src_nents || iv_dma + ivsize !=
1334 sg_dma_address(req->src)) {
1335 all_contig = false;
1336 assoc_nents = assoc_nents ? : 1;
1337 src_nents = src_nents ? : 1;
1338 sec4_sg_len = assoc_nents + 1 + src_nents;
1339 }
1340 sec4_sg_len += dst_nents;
1341
1342 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1343
1344 /* allocate space for base edesc and hw desc commands, link tables */
1345 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1346 sec4_sg_bytes, GFP_DMA | flags);
1347 if (!edesc) {
1348 dev_err(jrdev, "could not allocate extended descriptor\n");
1349 return ERR_PTR(-ENOMEM);
1350 }
1351
1352 edesc->assoc_nents = assoc_nents;
1353 edesc->assoc_chained = assoc_chained;
1354 edesc->src_nents = src_nents;
1355 edesc->src_chained = src_chained;
1356 edesc->dst_nents = dst_nents;
1357 edesc->dst_chained = dst_chained;
1358 edesc->iv_dma = iv_dma;
1359 edesc->sec4_sg_bytes = sec4_sg_bytes;
1360 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1361 desc_bytes;
1362 *all_contig_ptr = all_contig;
1363
1364 sec4_sg_index = 0;
1365 if (!all_contig) {
1366 sg_to_sec4_sg(req->assoc,
1367 (assoc_nents ? : 1),
1368 edesc->sec4_sg +
1369 sec4_sg_index, 0);
1370 sec4_sg_index += assoc_nents ? : 1;
1371 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1372 iv_dma, ivsize, 0);
1373 sec4_sg_index += 1;
1374 sg_to_sec4_sg_last(req->src,
1375 (src_nents ? : 1),
1376 edesc->sec4_sg +
1377 sec4_sg_index, 0);
1378 sec4_sg_index += src_nents ? : 1;
1379 }
1380 if (dst_nents) {
1381 sg_to_sec4_sg_last(req->dst, dst_nents,
1382 edesc->sec4_sg + sec4_sg_index, 0);
1383 }
1384 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1385 sec4_sg_bytes, DMA_TO_DEVICE);
1386 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1387 dev_err(jrdev, "unable to map S/G table\n");
1388 return ERR_PTR(-ENOMEM);
1389 }
1390
1391 return edesc;
1392 }
1393
aead_encrypt(struct aead_request * req)1394 static int aead_encrypt(struct aead_request *req)
1395 {
1396 struct aead_edesc *edesc;
1397 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1398 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1399 struct device *jrdev = ctx->jrdev;
1400 bool all_contig;
1401 u32 *desc;
1402 int ret = 0;
1403
1404 /* allocate extended descriptor */
1405 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1406 CAAM_CMD_SZ, &all_contig, true);
1407 if (IS_ERR(edesc))
1408 return PTR_ERR(edesc);
1409
1410 /* Create and submit job descriptor */
1411 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1412 all_contig, true);
1413 #ifdef DEBUG
1414 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1415 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1416 desc_bytes(edesc->hw_desc), 1);
1417 #endif
1418
1419 desc = edesc->hw_desc;
1420 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1421 if (!ret) {
1422 ret = -EINPROGRESS;
1423 } else {
1424 aead_unmap(jrdev, edesc, req);
1425 kfree(edesc);
1426 }
1427
1428 return ret;
1429 }
1430
aead_decrypt(struct aead_request * req)1431 static int aead_decrypt(struct aead_request *req)
1432 {
1433 struct aead_edesc *edesc;
1434 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1435 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1436 struct device *jrdev = ctx->jrdev;
1437 bool all_contig;
1438 u32 *desc;
1439 int ret = 0;
1440
1441 /* allocate extended descriptor */
1442 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1443 CAAM_CMD_SZ, &all_contig, false);
1444 if (IS_ERR(edesc))
1445 return PTR_ERR(edesc);
1446
1447 #ifdef DEBUG
1448 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
1449 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1450 req->cryptlen, 1);
1451 #endif
1452
1453 /* Create and submit job descriptor*/
1454 init_aead_job(ctx->sh_desc_dec,
1455 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1456 #ifdef DEBUG
1457 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1458 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1459 desc_bytes(edesc->hw_desc), 1);
1460 #endif
1461
1462 desc = edesc->hw_desc;
1463 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1464 if (!ret) {
1465 ret = -EINPROGRESS;
1466 } else {
1467 aead_unmap(jrdev, edesc, req);
1468 kfree(edesc);
1469 }
1470
1471 return ret;
1472 }
1473
1474 /*
1475 * allocate and map the aead extended descriptor for aead givencrypt
1476 */
aead_giv_edesc_alloc(struct aead_givcrypt_request * greq,int desc_bytes,u32 * contig_ptr)1477 static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1478 *greq, int desc_bytes,
1479 u32 *contig_ptr)
1480 {
1481 struct aead_request *req = &greq->areq;
1482 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1483 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1484 struct device *jrdev = ctx->jrdev;
1485 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1486 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1487 int assoc_nents, src_nents, dst_nents = 0;
1488 struct aead_edesc *edesc;
1489 dma_addr_t iv_dma = 0;
1490 int sgc;
1491 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1492 int ivsize = crypto_aead_ivsize(aead);
1493 bool assoc_chained = false, src_chained = false, dst_chained = false;
1494 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1495
1496 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1497 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1498
1499 if (unlikely(req->dst != req->src))
1500 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
1501 &dst_chained);
1502
1503 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1504 DMA_TO_DEVICE, assoc_chained);
1505 if (likely(req->src == req->dst)) {
1506 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1507 DMA_BIDIRECTIONAL, src_chained);
1508 } else {
1509 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1510 DMA_TO_DEVICE, src_chained);
1511 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1512 DMA_FROM_DEVICE, dst_chained);
1513 }
1514
1515 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1516 if (dma_mapping_error(jrdev, iv_dma)) {
1517 dev_err(jrdev, "unable to map IV\n");
1518 return ERR_PTR(-ENOMEM);
1519 }
1520
1521 /* Check if data are contiguous */
1522 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1523 iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1524 contig &= ~GIV_SRC_CONTIG;
1525 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1526 contig &= ~GIV_DST_CONTIG;
1527 if (unlikely(req->src != req->dst)) {
1528 dst_nents = dst_nents ? : 1;
1529 sec4_sg_len += 1;
1530 }
1531 if (!(contig & GIV_SRC_CONTIG)) {
1532 assoc_nents = assoc_nents ? : 1;
1533 src_nents = src_nents ? : 1;
1534 sec4_sg_len += assoc_nents + 1 + src_nents;
1535 if (likely(req->src == req->dst))
1536 contig &= ~GIV_DST_CONTIG;
1537 }
1538 sec4_sg_len += dst_nents;
1539
1540 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1541
1542 /* allocate space for base edesc and hw desc commands, link tables */
1543 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1544 sec4_sg_bytes, GFP_DMA | flags);
1545 if (!edesc) {
1546 dev_err(jrdev, "could not allocate extended descriptor\n");
1547 return ERR_PTR(-ENOMEM);
1548 }
1549
1550 edesc->assoc_nents = assoc_nents;
1551 edesc->assoc_chained = assoc_chained;
1552 edesc->src_nents = src_nents;
1553 edesc->src_chained = src_chained;
1554 edesc->dst_nents = dst_nents;
1555 edesc->dst_chained = dst_chained;
1556 edesc->iv_dma = iv_dma;
1557 edesc->sec4_sg_bytes = sec4_sg_bytes;
1558 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1559 desc_bytes;
1560 *contig_ptr = contig;
1561
1562 sec4_sg_index = 0;
1563 if (!(contig & GIV_SRC_CONTIG)) {
1564 sg_to_sec4_sg(req->assoc, assoc_nents,
1565 edesc->sec4_sg +
1566 sec4_sg_index, 0);
1567 sec4_sg_index += assoc_nents;
1568 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1569 iv_dma, ivsize, 0);
1570 sec4_sg_index += 1;
1571 sg_to_sec4_sg_last(req->src, src_nents,
1572 edesc->sec4_sg +
1573 sec4_sg_index, 0);
1574 sec4_sg_index += src_nents;
1575 }
1576 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1577 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1578 iv_dma, ivsize, 0);
1579 sec4_sg_index += 1;
1580 sg_to_sec4_sg_last(req->dst, dst_nents,
1581 edesc->sec4_sg + sec4_sg_index, 0);
1582 }
1583 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1584 sec4_sg_bytes, DMA_TO_DEVICE);
1585 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1586 dev_err(jrdev, "unable to map S/G table\n");
1587 return ERR_PTR(-ENOMEM);
1588 }
1589
1590 return edesc;
1591 }
1592
aead_givencrypt(struct aead_givcrypt_request * areq)1593 static int aead_givencrypt(struct aead_givcrypt_request *areq)
1594 {
1595 struct aead_request *req = &areq->areq;
1596 struct aead_edesc *edesc;
1597 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1598 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1599 struct device *jrdev = ctx->jrdev;
1600 u32 contig;
1601 u32 *desc;
1602 int ret = 0;
1603
1604 /* allocate extended descriptor */
1605 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1606 CAAM_CMD_SZ, &contig);
1607
1608 if (IS_ERR(edesc))
1609 return PTR_ERR(edesc);
1610
1611 #ifdef DEBUG
1612 print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
1613 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1614 req->cryptlen, 1);
1615 #endif
1616
1617 /* Create and submit job descriptor*/
1618 init_aead_giv_job(ctx->sh_desc_givenc,
1619 ctx->sh_desc_givenc_dma, edesc, req, contig);
1620 #ifdef DEBUG
1621 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1622 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1623 desc_bytes(edesc->hw_desc), 1);
1624 #endif
1625
1626 desc = edesc->hw_desc;
1627 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1628 if (!ret) {
1629 ret = -EINPROGRESS;
1630 } else {
1631 aead_unmap(jrdev, edesc, req);
1632 kfree(edesc);
1633 }
1634
1635 return ret;
1636 }
1637
aead_null_givencrypt(struct aead_givcrypt_request * areq)1638 static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
1639 {
1640 return aead_encrypt(&areq->areq);
1641 }
1642
1643 /*
1644 * allocate and map the ablkcipher extended descriptor for ablkcipher
1645 */
ablkcipher_edesc_alloc(struct ablkcipher_request * req,int desc_bytes,bool * iv_contig_out)1646 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1647 *req, int desc_bytes,
1648 bool *iv_contig_out)
1649 {
1650 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1651 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1652 struct device *jrdev = ctx->jrdev;
1653 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1654 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1655 GFP_KERNEL : GFP_ATOMIC;
1656 int src_nents, dst_nents = 0, sec4_sg_bytes;
1657 struct ablkcipher_edesc *edesc;
1658 dma_addr_t iv_dma = 0;
1659 bool iv_contig = false;
1660 int sgc;
1661 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1662 bool src_chained = false, dst_chained = false;
1663 int sec4_sg_index;
1664
1665 src_nents = sg_count(req->src, req->nbytes, &src_chained);
1666
1667 if (req->dst != req->src)
1668 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
1669
1670 if (likely(req->src == req->dst)) {
1671 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1672 DMA_BIDIRECTIONAL, src_chained);
1673 } else {
1674 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1675 DMA_TO_DEVICE, src_chained);
1676 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1677 DMA_FROM_DEVICE, dst_chained);
1678 }
1679
1680 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1681 if (dma_mapping_error(jrdev, iv_dma)) {
1682 dev_err(jrdev, "unable to map IV\n");
1683 return ERR_PTR(-ENOMEM);
1684 }
1685
1686 /*
1687 * Check if iv can be contiguous with source and destination.
1688 * If so, include it. If not, create scatterlist.
1689 */
1690 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1691 iv_contig = true;
1692 else
1693 src_nents = src_nents ? : 1;
1694 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1695 sizeof(struct sec4_sg_entry);
1696
1697 /* allocate space for base edesc and hw desc commands, link tables */
1698 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1699 sec4_sg_bytes, GFP_DMA | flags);
1700 if (!edesc) {
1701 dev_err(jrdev, "could not allocate extended descriptor\n");
1702 return ERR_PTR(-ENOMEM);
1703 }
1704
1705 edesc->src_nents = src_nents;
1706 edesc->src_chained = src_chained;
1707 edesc->dst_nents = dst_nents;
1708 edesc->dst_chained = dst_chained;
1709 edesc->sec4_sg_bytes = sec4_sg_bytes;
1710 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1711 desc_bytes;
1712
1713 sec4_sg_index = 0;
1714 if (!iv_contig) {
1715 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1716 sg_to_sec4_sg_last(req->src, src_nents,
1717 edesc->sec4_sg + 1, 0);
1718 sec4_sg_index += 1 + src_nents;
1719 }
1720
1721 if (dst_nents) {
1722 sg_to_sec4_sg_last(req->dst, dst_nents,
1723 edesc->sec4_sg + sec4_sg_index, 0);
1724 }
1725
1726 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1727 sec4_sg_bytes, DMA_TO_DEVICE);
1728 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1729 dev_err(jrdev, "unable to map S/G table\n");
1730 return ERR_PTR(-ENOMEM);
1731 }
1732
1733 edesc->iv_dma = iv_dma;
1734
1735 #ifdef DEBUG
1736 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
1737 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1738 sec4_sg_bytes, 1);
1739 #endif
1740
1741 *iv_contig_out = iv_contig;
1742 return edesc;
1743 }
1744
ablkcipher_encrypt(struct ablkcipher_request * req)1745 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1746 {
1747 struct ablkcipher_edesc *edesc;
1748 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1749 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1750 struct device *jrdev = ctx->jrdev;
1751 bool iv_contig;
1752 u32 *desc;
1753 int ret = 0;
1754
1755 /* allocate extended descriptor */
1756 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1757 CAAM_CMD_SZ, &iv_contig);
1758 if (IS_ERR(edesc))
1759 return PTR_ERR(edesc);
1760
1761 /* Create and submit job descriptor*/
1762 init_ablkcipher_job(ctx->sh_desc_enc,
1763 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1764 #ifdef DEBUG
1765 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1766 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1767 desc_bytes(edesc->hw_desc), 1);
1768 #endif
1769 desc = edesc->hw_desc;
1770 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1771
1772 if (!ret) {
1773 ret = -EINPROGRESS;
1774 } else {
1775 ablkcipher_unmap(jrdev, edesc, req);
1776 kfree(edesc);
1777 }
1778
1779 return ret;
1780 }
1781
ablkcipher_decrypt(struct ablkcipher_request * req)1782 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1783 {
1784 struct ablkcipher_edesc *edesc;
1785 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1786 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1787 struct device *jrdev = ctx->jrdev;
1788 bool iv_contig;
1789 u32 *desc;
1790 int ret = 0;
1791
1792 /* allocate extended descriptor */
1793 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1794 CAAM_CMD_SZ, &iv_contig);
1795 if (IS_ERR(edesc))
1796 return PTR_ERR(edesc);
1797
1798 /* Create and submit job descriptor*/
1799 init_ablkcipher_job(ctx->sh_desc_dec,
1800 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1801 desc = edesc->hw_desc;
1802 #ifdef DEBUG
1803 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1804 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1805 desc_bytes(edesc->hw_desc), 1);
1806 #endif
1807
1808 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1809 if (!ret) {
1810 ret = -EINPROGRESS;
1811 } else {
1812 ablkcipher_unmap(jrdev, edesc, req);
1813 kfree(edesc);
1814 }
1815
1816 return ret;
1817 }
1818
1819 #define template_aead template_u.aead
1820 #define template_ablkcipher template_u.ablkcipher
1821 struct caam_alg_template {
1822 char name[CRYPTO_MAX_ALG_NAME];
1823 char driver_name[CRYPTO_MAX_ALG_NAME];
1824 unsigned int blocksize;
1825 u32 type;
1826 union {
1827 struct ablkcipher_alg ablkcipher;
1828 struct aead_alg aead;
1829 struct blkcipher_alg blkcipher;
1830 struct cipher_alg cipher;
1831 struct compress_alg compress;
1832 struct rng_alg rng;
1833 } template_u;
1834 u32 class1_alg_type;
1835 u32 class2_alg_type;
1836 u32 alg_op;
1837 };
1838
1839 static struct caam_alg_template driver_algs[] = {
1840 /* single-pass ipsec_esp descriptor */
1841 {
1842 .name = "authenc(hmac(md5),ecb(cipher_null))",
1843 .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
1844 .blocksize = NULL_BLOCK_SIZE,
1845 .type = CRYPTO_ALG_TYPE_AEAD,
1846 .template_aead = {
1847 .setkey = aead_setkey,
1848 .setauthsize = aead_setauthsize,
1849 .encrypt = aead_encrypt,
1850 .decrypt = aead_decrypt,
1851 .givencrypt = aead_null_givencrypt,
1852 .geniv = "<built-in>",
1853 .ivsize = NULL_IV_SIZE,
1854 .maxauthsize = MD5_DIGEST_SIZE,
1855 },
1856 .class1_alg_type = 0,
1857 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1858 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1859 },
1860 {
1861 .name = "authenc(hmac(sha1),ecb(cipher_null))",
1862 .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
1863 .blocksize = NULL_BLOCK_SIZE,
1864 .type = CRYPTO_ALG_TYPE_AEAD,
1865 .template_aead = {
1866 .setkey = aead_setkey,
1867 .setauthsize = aead_setauthsize,
1868 .encrypt = aead_encrypt,
1869 .decrypt = aead_decrypt,
1870 .givencrypt = aead_null_givencrypt,
1871 .geniv = "<built-in>",
1872 .ivsize = NULL_IV_SIZE,
1873 .maxauthsize = SHA1_DIGEST_SIZE,
1874 },
1875 .class1_alg_type = 0,
1876 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1877 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1878 },
1879 {
1880 .name = "authenc(hmac(sha224),ecb(cipher_null))",
1881 .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
1882 .blocksize = NULL_BLOCK_SIZE,
1883 .type = CRYPTO_ALG_TYPE_AEAD,
1884 .template_aead = {
1885 .setkey = aead_setkey,
1886 .setauthsize = aead_setauthsize,
1887 .encrypt = aead_encrypt,
1888 .decrypt = aead_decrypt,
1889 .givencrypt = aead_null_givencrypt,
1890 .geniv = "<built-in>",
1891 .ivsize = NULL_IV_SIZE,
1892 .maxauthsize = SHA224_DIGEST_SIZE,
1893 },
1894 .class1_alg_type = 0,
1895 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1896 OP_ALG_AAI_HMAC_PRECOMP,
1897 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1898 },
1899 {
1900 .name = "authenc(hmac(sha256),ecb(cipher_null))",
1901 .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
1902 .blocksize = NULL_BLOCK_SIZE,
1903 .type = CRYPTO_ALG_TYPE_AEAD,
1904 .template_aead = {
1905 .setkey = aead_setkey,
1906 .setauthsize = aead_setauthsize,
1907 .encrypt = aead_encrypt,
1908 .decrypt = aead_decrypt,
1909 .givencrypt = aead_null_givencrypt,
1910 .geniv = "<built-in>",
1911 .ivsize = NULL_IV_SIZE,
1912 .maxauthsize = SHA256_DIGEST_SIZE,
1913 },
1914 .class1_alg_type = 0,
1915 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1916 OP_ALG_AAI_HMAC_PRECOMP,
1917 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1918 },
1919 {
1920 .name = "authenc(hmac(sha384),ecb(cipher_null))",
1921 .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
1922 .blocksize = NULL_BLOCK_SIZE,
1923 .type = CRYPTO_ALG_TYPE_AEAD,
1924 .template_aead = {
1925 .setkey = aead_setkey,
1926 .setauthsize = aead_setauthsize,
1927 .encrypt = aead_encrypt,
1928 .decrypt = aead_decrypt,
1929 .givencrypt = aead_null_givencrypt,
1930 .geniv = "<built-in>",
1931 .ivsize = NULL_IV_SIZE,
1932 .maxauthsize = SHA384_DIGEST_SIZE,
1933 },
1934 .class1_alg_type = 0,
1935 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1936 OP_ALG_AAI_HMAC_PRECOMP,
1937 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1938 },
1939 {
1940 .name = "authenc(hmac(sha512),ecb(cipher_null))",
1941 .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
1942 .blocksize = NULL_BLOCK_SIZE,
1943 .type = CRYPTO_ALG_TYPE_AEAD,
1944 .template_aead = {
1945 .setkey = aead_setkey,
1946 .setauthsize = aead_setauthsize,
1947 .encrypt = aead_encrypt,
1948 .decrypt = aead_decrypt,
1949 .givencrypt = aead_null_givencrypt,
1950 .geniv = "<built-in>",
1951 .ivsize = NULL_IV_SIZE,
1952 .maxauthsize = SHA512_DIGEST_SIZE,
1953 },
1954 .class1_alg_type = 0,
1955 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1956 OP_ALG_AAI_HMAC_PRECOMP,
1957 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1958 },
1959 {
1960 .name = "authenc(hmac(md5),cbc(aes))",
1961 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
1962 .blocksize = AES_BLOCK_SIZE,
1963 .type = CRYPTO_ALG_TYPE_AEAD,
1964 .template_aead = {
1965 .setkey = aead_setkey,
1966 .setauthsize = aead_setauthsize,
1967 .encrypt = aead_encrypt,
1968 .decrypt = aead_decrypt,
1969 .givencrypt = aead_givencrypt,
1970 .geniv = "<built-in>",
1971 .ivsize = AES_BLOCK_SIZE,
1972 .maxauthsize = MD5_DIGEST_SIZE,
1973 },
1974 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1975 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1976 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1977 },
1978 {
1979 .name = "authenc(hmac(sha1),cbc(aes))",
1980 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1981 .blocksize = AES_BLOCK_SIZE,
1982 .type = CRYPTO_ALG_TYPE_AEAD,
1983 .template_aead = {
1984 .setkey = aead_setkey,
1985 .setauthsize = aead_setauthsize,
1986 .encrypt = aead_encrypt,
1987 .decrypt = aead_decrypt,
1988 .givencrypt = aead_givencrypt,
1989 .geniv = "<built-in>",
1990 .ivsize = AES_BLOCK_SIZE,
1991 .maxauthsize = SHA1_DIGEST_SIZE,
1992 },
1993 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1994 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1995 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1996 },
1997 {
1998 .name = "authenc(hmac(sha224),cbc(aes))",
1999 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
2000 .blocksize = AES_BLOCK_SIZE,
2001 .type = CRYPTO_ALG_TYPE_AEAD,
2002 .template_aead = {
2003 .setkey = aead_setkey,
2004 .setauthsize = aead_setauthsize,
2005 .encrypt = aead_encrypt,
2006 .decrypt = aead_decrypt,
2007 .givencrypt = aead_givencrypt,
2008 .geniv = "<built-in>",
2009 .ivsize = AES_BLOCK_SIZE,
2010 .maxauthsize = SHA224_DIGEST_SIZE,
2011 },
2012 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2013 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2014 OP_ALG_AAI_HMAC_PRECOMP,
2015 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2016 },
2017 {
2018 .name = "authenc(hmac(sha256),cbc(aes))",
2019 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
2020 .blocksize = AES_BLOCK_SIZE,
2021 .type = CRYPTO_ALG_TYPE_AEAD,
2022 .template_aead = {
2023 .setkey = aead_setkey,
2024 .setauthsize = aead_setauthsize,
2025 .encrypt = aead_encrypt,
2026 .decrypt = aead_decrypt,
2027 .givencrypt = aead_givencrypt,
2028 .geniv = "<built-in>",
2029 .ivsize = AES_BLOCK_SIZE,
2030 .maxauthsize = SHA256_DIGEST_SIZE,
2031 },
2032 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2033 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2034 OP_ALG_AAI_HMAC_PRECOMP,
2035 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2036 },
2037 {
2038 .name = "authenc(hmac(sha384),cbc(aes))",
2039 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
2040 .blocksize = AES_BLOCK_SIZE,
2041 .type = CRYPTO_ALG_TYPE_AEAD,
2042 .template_aead = {
2043 .setkey = aead_setkey,
2044 .setauthsize = aead_setauthsize,
2045 .encrypt = aead_encrypt,
2046 .decrypt = aead_decrypt,
2047 .givencrypt = aead_givencrypt,
2048 .geniv = "<built-in>",
2049 .ivsize = AES_BLOCK_SIZE,
2050 .maxauthsize = SHA384_DIGEST_SIZE,
2051 },
2052 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2053 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2054 OP_ALG_AAI_HMAC_PRECOMP,
2055 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2056 },
2057
2058 {
2059 .name = "authenc(hmac(sha512),cbc(aes))",
2060 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
2061 .blocksize = AES_BLOCK_SIZE,
2062 .type = CRYPTO_ALG_TYPE_AEAD,
2063 .template_aead = {
2064 .setkey = aead_setkey,
2065 .setauthsize = aead_setauthsize,
2066 .encrypt = aead_encrypt,
2067 .decrypt = aead_decrypt,
2068 .givencrypt = aead_givencrypt,
2069 .geniv = "<built-in>",
2070 .ivsize = AES_BLOCK_SIZE,
2071 .maxauthsize = SHA512_DIGEST_SIZE,
2072 },
2073 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2074 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2075 OP_ALG_AAI_HMAC_PRECOMP,
2076 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2077 },
2078 {
2079 .name = "authenc(hmac(md5),cbc(des3_ede))",
2080 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
2081 .blocksize = DES3_EDE_BLOCK_SIZE,
2082 .type = CRYPTO_ALG_TYPE_AEAD,
2083 .template_aead = {
2084 .setkey = aead_setkey,
2085 .setauthsize = aead_setauthsize,
2086 .encrypt = aead_encrypt,
2087 .decrypt = aead_decrypt,
2088 .givencrypt = aead_givencrypt,
2089 .geniv = "<built-in>",
2090 .ivsize = DES3_EDE_BLOCK_SIZE,
2091 .maxauthsize = MD5_DIGEST_SIZE,
2092 },
2093 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2094 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2095 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2096 },
2097 {
2098 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2099 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
2100 .blocksize = DES3_EDE_BLOCK_SIZE,
2101 .type = CRYPTO_ALG_TYPE_AEAD,
2102 .template_aead = {
2103 .setkey = aead_setkey,
2104 .setauthsize = aead_setauthsize,
2105 .encrypt = aead_encrypt,
2106 .decrypt = aead_decrypt,
2107 .givencrypt = aead_givencrypt,
2108 .geniv = "<built-in>",
2109 .ivsize = DES3_EDE_BLOCK_SIZE,
2110 .maxauthsize = SHA1_DIGEST_SIZE,
2111 },
2112 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2113 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2114 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2115 },
2116 {
2117 .name = "authenc(hmac(sha224),cbc(des3_ede))",
2118 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
2119 .blocksize = DES3_EDE_BLOCK_SIZE,
2120 .type = CRYPTO_ALG_TYPE_AEAD,
2121 .template_aead = {
2122 .setkey = aead_setkey,
2123 .setauthsize = aead_setauthsize,
2124 .encrypt = aead_encrypt,
2125 .decrypt = aead_decrypt,
2126 .givencrypt = aead_givencrypt,
2127 .geniv = "<built-in>",
2128 .ivsize = DES3_EDE_BLOCK_SIZE,
2129 .maxauthsize = SHA224_DIGEST_SIZE,
2130 },
2131 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2132 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2133 OP_ALG_AAI_HMAC_PRECOMP,
2134 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2135 },
2136 {
2137 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2138 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
2139 .blocksize = DES3_EDE_BLOCK_SIZE,
2140 .type = CRYPTO_ALG_TYPE_AEAD,
2141 .template_aead = {
2142 .setkey = aead_setkey,
2143 .setauthsize = aead_setauthsize,
2144 .encrypt = aead_encrypt,
2145 .decrypt = aead_decrypt,
2146 .givencrypt = aead_givencrypt,
2147 .geniv = "<built-in>",
2148 .ivsize = DES3_EDE_BLOCK_SIZE,
2149 .maxauthsize = SHA256_DIGEST_SIZE,
2150 },
2151 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2152 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2153 OP_ALG_AAI_HMAC_PRECOMP,
2154 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2155 },
2156 {
2157 .name = "authenc(hmac(sha384),cbc(des3_ede))",
2158 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
2159 .blocksize = DES3_EDE_BLOCK_SIZE,
2160 .type = CRYPTO_ALG_TYPE_AEAD,
2161 .template_aead = {
2162 .setkey = aead_setkey,
2163 .setauthsize = aead_setauthsize,
2164 .encrypt = aead_encrypt,
2165 .decrypt = aead_decrypt,
2166 .givencrypt = aead_givencrypt,
2167 .geniv = "<built-in>",
2168 .ivsize = DES3_EDE_BLOCK_SIZE,
2169 .maxauthsize = SHA384_DIGEST_SIZE,
2170 },
2171 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2172 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2173 OP_ALG_AAI_HMAC_PRECOMP,
2174 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2175 },
2176 {
2177 .name = "authenc(hmac(sha512),cbc(des3_ede))",
2178 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
2179 .blocksize = DES3_EDE_BLOCK_SIZE,
2180 .type = CRYPTO_ALG_TYPE_AEAD,
2181 .template_aead = {
2182 .setkey = aead_setkey,
2183 .setauthsize = aead_setauthsize,
2184 .encrypt = aead_encrypt,
2185 .decrypt = aead_decrypt,
2186 .givencrypt = aead_givencrypt,
2187 .geniv = "<built-in>",
2188 .ivsize = DES3_EDE_BLOCK_SIZE,
2189 .maxauthsize = SHA512_DIGEST_SIZE,
2190 },
2191 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2192 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2193 OP_ALG_AAI_HMAC_PRECOMP,
2194 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2195 },
2196 {
2197 .name = "authenc(hmac(md5),cbc(des))",
2198 .driver_name = "authenc-hmac-md5-cbc-des-caam",
2199 .blocksize = DES_BLOCK_SIZE,
2200 .type = CRYPTO_ALG_TYPE_AEAD,
2201 .template_aead = {
2202 .setkey = aead_setkey,
2203 .setauthsize = aead_setauthsize,
2204 .encrypt = aead_encrypt,
2205 .decrypt = aead_decrypt,
2206 .givencrypt = aead_givencrypt,
2207 .geniv = "<built-in>",
2208 .ivsize = DES_BLOCK_SIZE,
2209 .maxauthsize = MD5_DIGEST_SIZE,
2210 },
2211 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2212 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2213 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2214 },
2215 {
2216 .name = "authenc(hmac(sha1),cbc(des))",
2217 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
2218 .blocksize = DES_BLOCK_SIZE,
2219 .type = CRYPTO_ALG_TYPE_AEAD,
2220 .template_aead = {
2221 .setkey = aead_setkey,
2222 .setauthsize = aead_setauthsize,
2223 .encrypt = aead_encrypt,
2224 .decrypt = aead_decrypt,
2225 .givencrypt = aead_givencrypt,
2226 .geniv = "<built-in>",
2227 .ivsize = DES_BLOCK_SIZE,
2228 .maxauthsize = SHA1_DIGEST_SIZE,
2229 },
2230 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2231 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2232 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2233 },
2234 {
2235 .name = "authenc(hmac(sha224),cbc(des))",
2236 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
2237 .blocksize = DES_BLOCK_SIZE,
2238 .type = CRYPTO_ALG_TYPE_AEAD,
2239 .template_aead = {
2240 .setkey = aead_setkey,
2241 .setauthsize = aead_setauthsize,
2242 .encrypt = aead_encrypt,
2243 .decrypt = aead_decrypt,
2244 .givencrypt = aead_givencrypt,
2245 .geniv = "<built-in>",
2246 .ivsize = DES_BLOCK_SIZE,
2247 .maxauthsize = SHA224_DIGEST_SIZE,
2248 },
2249 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2250 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2251 OP_ALG_AAI_HMAC_PRECOMP,
2252 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2253 },
2254 {
2255 .name = "authenc(hmac(sha256),cbc(des))",
2256 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
2257 .blocksize = DES_BLOCK_SIZE,
2258 .type = CRYPTO_ALG_TYPE_AEAD,
2259 .template_aead = {
2260 .setkey = aead_setkey,
2261 .setauthsize = aead_setauthsize,
2262 .encrypt = aead_encrypt,
2263 .decrypt = aead_decrypt,
2264 .givencrypt = aead_givencrypt,
2265 .geniv = "<built-in>",
2266 .ivsize = DES_BLOCK_SIZE,
2267 .maxauthsize = SHA256_DIGEST_SIZE,
2268 },
2269 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2270 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2271 OP_ALG_AAI_HMAC_PRECOMP,
2272 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2273 },
2274 {
2275 .name = "authenc(hmac(sha384),cbc(des))",
2276 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
2277 .blocksize = DES_BLOCK_SIZE,
2278 .type = CRYPTO_ALG_TYPE_AEAD,
2279 .template_aead = {
2280 .setkey = aead_setkey,
2281 .setauthsize = aead_setauthsize,
2282 .encrypt = aead_encrypt,
2283 .decrypt = aead_decrypt,
2284 .givencrypt = aead_givencrypt,
2285 .geniv = "<built-in>",
2286 .ivsize = DES_BLOCK_SIZE,
2287 .maxauthsize = SHA384_DIGEST_SIZE,
2288 },
2289 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2290 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2291 OP_ALG_AAI_HMAC_PRECOMP,
2292 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2293 },
2294 {
2295 .name = "authenc(hmac(sha512),cbc(des))",
2296 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
2297 .blocksize = DES_BLOCK_SIZE,
2298 .type = CRYPTO_ALG_TYPE_AEAD,
2299 .template_aead = {
2300 .setkey = aead_setkey,
2301 .setauthsize = aead_setauthsize,
2302 .encrypt = aead_encrypt,
2303 .decrypt = aead_decrypt,
2304 .givencrypt = aead_givencrypt,
2305 .geniv = "<built-in>",
2306 .ivsize = DES_BLOCK_SIZE,
2307 .maxauthsize = SHA512_DIGEST_SIZE,
2308 },
2309 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2310 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2311 OP_ALG_AAI_HMAC_PRECOMP,
2312 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2313 },
2314 /* ablkcipher descriptor */
2315 {
2316 .name = "cbc(aes)",
2317 .driver_name = "cbc-aes-caam",
2318 .blocksize = AES_BLOCK_SIZE,
2319 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2320 .template_ablkcipher = {
2321 .setkey = ablkcipher_setkey,
2322 .encrypt = ablkcipher_encrypt,
2323 .decrypt = ablkcipher_decrypt,
2324 .geniv = "eseqiv",
2325 .min_keysize = AES_MIN_KEY_SIZE,
2326 .max_keysize = AES_MAX_KEY_SIZE,
2327 .ivsize = AES_BLOCK_SIZE,
2328 },
2329 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2330 },
2331 {
2332 .name = "cbc(des3_ede)",
2333 .driver_name = "cbc-3des-caam",
2334 .blocksize = DES3_EDE_BLOCK_SIZE,
2335 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2336 .template_ablkcipher = {
2337 .setkey = ablkcipher_setkey,
2338 .encrypt = ablkcipher_encrypt,
2339 .decrypt = ablkcipher_decrypt,
2340 .geniv = "eseqiv",
2341 .min_keysize = DES3_EDE_KEY_SIZE,
2342 .max_keysize = DES3_EDE_KEY_SIZE,
2343 .ivsize = DES3_EDE_BLOCK_SIZE,
2344 },
2345 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2346 },
2347 {
2348 .name = "cbc(des)",
2349 .driver_name = "cbc-des-caam",
2350 .blocksize = DES_BLOCK_SIZE,
2351 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2352 .template_ablkcipher = {
2353 .setkey = ablkcipher_setkey,
2354 .encrypt = ablkcipher_encrypt,
2355 .decrypt = ablkcipher_decrypt,
2356 .geniv = "eseqiv",
2357 .min_keysize = DES_KEY_SIZE,
2358 .max_keysize = DES_KEY_SIZE,
2359 .ivsize = DES_BLOCK_SIZE,
2360 },
2361 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2362 }
2363 };
2364
2365 struct caam_crypto_alg {
2366 struct list_head entry;
2367 int class1_alg_type;
2368 int class2_alg_type;
2369 int alg_op;
2370 struct crypto_alg crypto_alg;
2371 };
2372
caam_cra_init(struct crypto_tfm * tfm)2373 static int caam_cra_init(struct crypto_tfm *tfm)
2374 {
2375 struct crypto_alg *alg = tfm->__crt_alg;
2376 struct caam_crypto_alg *caam_alg =
2377 container_of(alg, struct caam_crypto_alg, crypto_alg);
2378 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2379
2380 ctx->jrdev = caam_jr_alloc();
2381 if (IS_ERR(ctx->jrdev)) {
2382 pr_err("Job Ring Device allocation for transform failed\n");
2383 return PTR_ERR(ctx->jrdev);
2384 }
2385
2386 /* copy descriptor header template value */
2387 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2388 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2389 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2390
2391 return 0;
2392 }
2393
caam_cra_exit(struct crypto_tfm * tfm)2394 static void caam_cra_exit(struct crypto_tfm *tfm)
2395 {
2396 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2397
2398 if (ctx->sh_desc_enc_dma &&
2399 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2400 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2401 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2402 if (ctx->sh_desc_dec_dma &&
2403 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2404 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2405 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2406 if (ctx->sh_desc_givenc_dma &&
2407 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2408 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2409 desc_bytes(ctx->sh_desc_givenc),
2410 DMA_TO_DEVICE);
2411 if (ctx->key_dma &&
2412 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
2413 dma_unmap_single(ctx->jrdev, ctx->key_dma,
2414 ctx->enckeylen + ctx->split_key_pad_len,
2415 DMA_TO_DEVICE);
2416
2417 caam_jr_free(ctx->jrdev);
2418 }
2419
caam_algapi_exit(void)2420 static void __exit caam_algapi_exit(void)
2421 {
2422
2423 struct caam_crypto_alg *t_alg, *n;
2424
2425 if (!alg_list.next)
2426 return;
2427
2428 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2429 crypto_unregister_alg(&t_alg->crypto_alg);
2430 list_del(&t_alg->entry);
2431 kfree(t_alg);
2432 }
2433 }
2434
caam_alg_alloc(struct caam_alg_template * template)2435 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2436 *template)
2437 {
2438 struct caam_crypto_alg *t_alg;
2439 struct crypto_alg *alg;
2440
2441 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2442 if (!t_alg) {
2443 pr_err("failed to allocate t_alg\n");
2444 return ERR_PTR(-ENOMEM);
2445 }
2446
2447 alg = &t_alg->crypto_alg;
2448
2449 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2450 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2451 template->driver_name);
2452 alg->cra_module = THIS_MODULE;
2453 alg->cra_init = caam_cra_init;
2454 alg->cra_exit = caam_cra_exit;
2455 alg->cra_priority = CAAM_CRA_PRIORITY;
2456 alg->cra_blocksize = template->blocksize;
2457 alg->cra_alignmask = 0;
2458 alg->cra_ctxsize = sizeof(struct caam_ctx);
2459 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2460 template->type;
2461 switch (template->type) {
2462 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2463 alg->cra_type = &crypto_ablkcipher_type;
2464 alg->cra_ablkcipher = template->template_ablkcipher;
2465 break;
2466 case CRYPTO_ALG_TYPE_AEAD:
2467 alg->cra_type = &crypto_aead_type;
2468 alg->cra_aead = template->template_aead;
2469 break;
2470 }
2471
2472 t_alg->class1_alg_type = template->class1_alg_type;
2473 t_alg->class2_alg_type = template->class2_alg_type;
2474 t_alg->alg_op = template->alg_op;
2475
2476 return t_alg;
2477 }
2478
caam_algapi_init(void)2479 static int __init caam_algapi_init(void)
2480 {
2481 struct device_node *dev_node;
2482 struct platform_device *pdev;
2483 struct device *ctrldev;
2484 void *priv;
2485 int i = 0, err = 0;
2486
2487 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2488 if (!dev_node) {
2489 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2490 if (!dev_node)
2491 return -ENODEV;
2492 }
2493
2494 pdev = of_find_device_by_node(dev_node);
2495 if (!pdev) {
2496 of_node_put(dev_node);
2497 return -ENODEV;
2498 }
2499
2500 ctrldev = &pdev->dev;
2501 priv = dev_get_drvdata(ctrldev);
2502 of_node_put(dev_node);
2503
2504 /*
2505 * If priv is NULL, it's probably because the caam driver wasn't
2506 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2507 */
2508 if (!priv)
2509 return -ENODEV;
2510
2511
2512 INIT_LIST_HEAD(&alg_list);
2513
2514 /* register crypto algorithms the device supports */
2515 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2516 /* TODO: check if h/w supports alg */
2517 struct caam_crypto_alg *t_alg;
2518
2519 t_alg = caam_alg_alloc(&driver_algs[i]);
2520 if (IS_ERR(t_alg)) {
2521 err = PTR_ERR(t_alg);
2522 pr_warn("%s alg allocation failed\n",
2523 driver_algs[i].driver_name);
2524 continue;
2525 }
2526
2527 err = crypto_register_alg(&t_alg->crypto_alg);
2528 if (err) {
2529 pr_warn("%s alg registration failed\n",
2530 t_alg->crypto_alg.cra_driver_name);
2531 kfree(t_alg);
2532 } else
2533 list_add_tail(&t_alg->entry, &alg_list);
2534 }
2535 if (!list_empty(&alg_list))
2536 pr_info("caam algorithms registered in /proc/crypto\n");
2537
2538 return err;
2539 }
2540
2541 module_init(caam_algapi_init);
2542 module_exit(caam_algapi_exit);
2543
2544 MODULE_LICENSE("GPL");
2545 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2546 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
2547