1 /*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
40 * | (output length) |
41 * | SEQ_IN_PTR |
42 * | (input buffer) |
43 * | (input length) |
44 * ---------------------
45 */
46
47 #include "compat.h"
48
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
56
57 /*
58 * crypto alg
59 */
60 #define CAAM_CRA_PRIORITY 3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 CTR_RFC3686_NONCE_SIZE + \
64 SHA512_DIGEST_SIZE * 2)
65 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66 #define CAAM_MAX_IV_LENGTH 16
67
68 #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69 #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
70 CAAM_CMD_SZ * 4)
71 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
72 CAAM_CMD_SZ * 5)
73
74 /* length of descriptors text */
75 #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
77 #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
78 #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
79
80 /* Note: Nonce is counted in enckeylen */
81 #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
82
83 #define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
84 #define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
85 #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
86
87 #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
88 #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
89 #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
90
91 #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
92 #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
93 #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
94
95 #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
96 #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
97 #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
98
99 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
100 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
101 20 * CAAM_CMD_SZ)
102 #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
103 15 * CAAM_CMD_SZ)
104
105 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
106 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
107
108 #ifdef DEBUG
109 /* for print_hex_dumps with line references */
110 #define debug(format, arg...) printk(format, arg)
111 #else
112 #define debug(format, arg...)
113 #endif
114
115 #ifdef DEBUG
116 #include <linux/highmem.h>
117
dbg_dump_sg(const char * level,const char * prefix_str,int prefix_type,int rowsize,int groupsize,struct scatterlist * sg,size_t tlen,bool ascii,bool may_sleep)118 static void dbg_dump_sg(const char *level, const char *prefix_str,
119 int prefix_type, int rowsize, int groupsize,
120 struct scatterlist *sg, size_t tlen, bool ascii,
121 bool may_sleep)
122 {
123 struct scatterlist *it;
124 void *it_page;
125 size_t len;
126 void *buf;
127
128 for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
129 /*
130 * make sure the scatterlist's page
131 * has a valid virtual memory mapping
132 */
133 it_page = kmap_atomic(sg_page(it));
134 if (unlikely(!it_page)) {
135 printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
136 return;
137 }
138
139 buf = it_page + it->offset;
140 len = min_t(size_t, tlen, it->length);
141 print_hex_dump(level, prefix_str, prefix_type, rowsize,
142 groupsize, buf, len, ascii);
143 tlen -= len;
144
145 kunmap_atomic(it_page);
146 }
147 }
148 #endif
149
150 static struct list_head alg_list;
151
152 struct caam_alg_entry {
153 int class1_alg_type;
154 int class2_alg_type;
155 int alg_op;
156 bool rfc3686;
157 bool geniv;
158 };
159
160 struct caam_aead_alg {
161 struct aead_alg aead;
162 struct caam_alg_entry caam;
163 bool registered;
164 };
165
166 /* Set DK bit in class 1 operation if shared */
append_dec_op1(u32 * desc,u32 type)167 static inline void append_dec_op1(u32 *desc, u32 type)
168 {
169 u32 *jump_cmd, *uncond_jump_cmd;
170
171 /* DK bit is valid only for AES */
172 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
173 append_operation(desc, type | OP_ALG_AS_INITFINAL |
174 OP_ALG_DECRYPT);
175 return;
176 }
177
178 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
179 append_operation(desc, type | OP_ALG_AS_INITFINAL |
180 OP_ALG_DECRYPT);
181 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
182 set_jump_tgt_here(desc, jump_cmd);
183 append_operation(desc, type | OP_ALG_AS_INITFINAL |
184 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
185 set_jump_tgt_here(desc, uncond_jump_cmd);
186 }
187
188 /*
189 * For aead functions, read payload and write payload,
190 * both of which are specified in req->src and req->dst
191 */
aead_append_src_dst(u32 * desc,u32 msg_type)192 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
193 {
194 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
195 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
196 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
197 }
198
199 /*
200 * For ablkcipher encrypt and decrypt, read from req->src and
201 * write to req->dst
202 */
ablkcipher_append_src_dst(u32 * desc)203 static inline void ablkcipher_append_src_dst(u32 *desc)
204 {
205 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
206 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
207 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
208 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
209 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
210 }
211
212 /*
213 * per-session context
214 */
215 struct caam_ctx {
216 struct device *jrdev;
217 u32 sh_desc_enc[DESC_MAX_USED_LEN];
218 u32 sh_desc_dec[DESC_MAX_USED_LEN];
219 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
220 dma_addr_t sh_desc_enc_dma;
221 dma_addr_t sh_desc_dec_dma;
222 dma_addr_t sh_desc_givenc_dma;
223 u32 class1_alg_type;
224 u32 class2_alg_type;
225 u32 alg_op;
226 u8 key[CAAM_MAX_KEY_SIZE];
227 dma_addr_t key_dma;
228 unsigned int enckeylen;
229 unsigned int split_key_len;
230 unsigned int split_key_pad_len;
231 unsigned int authsize;
232 };
233
append_key_aead(u32 * desc,struct caam_ctx * ctx,int keys_fit_inline,bool is_rfc3686)234 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
235 int keys_fit_inline, bool is_rfc3686)
236 {
237 u32 *nonce;
238 unsigned int enckeylen = ctx->enckeylen;
239
240 /*
241 * RFC3686 specific:
242 * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
243 * | enckeylen = encryption key size + nonce size
244 */
245 if (is_rfc3686)
246 enckeylen -= CTR_RFC3686_NONCE_SIZE;
247
248 if (keys_fit_inline) {
249 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
250 ctx->split_key_len, CLASS_2 |
251 KEY_DEST_MDHA_SPLIT | KEY_ENC);
252 append_key_as_imm(desc, (void *)ctx->key +
253 ctx->split_key_pad_len, enckeylen,
254 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
255 } else {
256 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
257 KEY_DEST_MDHA_SPLIT | KEY_ENC);
258 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
259 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
260 }
261
262 /* Load Counter into CONTEXT1 reg */
263 if (is_rfc3686) {
264 nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
265 enckeylen);
266 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
267 LDST_CLASS_IND_CCB |
268 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
269 append_move(desc,
270 MOVE_SRC_OUTFIFO |
271 MOVE_DEST_CLASS1CTX |
272 (16 << MOVE_OFFSET_SHIFT) |
273 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
274 }
275 }
276
init_sh_desc_key_aead(u32 * desc,struct caam_ctx * ctx,int keys_fit_inline,bool is_rfc3686)277 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
278 int keys_fit_inline, bool is_rfc3686)
279 {
280 u32 *key_jump_cmd;
281
282 /* Note: Context registers are saved. */
283 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
284
285 /* Skip if already shared */
286 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
287 JUMP_COND_SHRD);
288
289 append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
290
291 set_jump_tgt_here(desc, key_jump_cmd);
292 }
293
aead_null_set_sh_desc(struct crypto_aead * aead)294 static int aead_null_set_sh_desc(struct crypto_aead *aead)
295 {
296 struct caam_ctx *ctx = crypto_aead_ctx(aead);
297 struct device *jrdev = ctx->jrdev;
298 bool keys_fit_inline = false;
299 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
300 u32 *desc;
301
302 /*
303 * Job Descriptor and Shared Descriptors
304 * must all fit into the 64-word Descriptor h/w Buffer
305 */
306 if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
307 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
308 keys_fit_inline = true;
309
310 /* aead_encrypt shared descriptor */
311 desc = ctx->sh_desc_enc;
312
313 init_sh_desc(desc, HDR_SHARE_SERIAL);
314
315 /* Skip if already shared */
316 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
317 JUMP_COND_SHRD);
318 if (keys_fit_inline)
319 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
320 ctx->split_key_len, CLASS_2 |
321 KEY_DEST_MDHA_SPLIT | KEY_ENC);
322 else
323 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
324 KEY_DEST_MDHA_SPLIT | KEY_ENC);
325 set_jump_tgt_here(desc, key_jump_cmd);
326
327 /* assoclen + cryptlen = seqinlen */
328 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
329
330 /* Prepare to read and write cryptlen + assoclen bytes */
331 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
332 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
333
334 /*
335 * MOVE_LEN opcode is not available in all SEC HW revisions,
336 * thus need to do some magic, i.e. self-patch the descriptor
337 * buffer.
338 */
339 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
340 MOVE_DEST_MATH3 |
341 (0x6 << MOVE_LEN_SHIFT));
342 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
343 MOVE_DEST_DESCBUF |
344 MOVE_WAITCOMP |
345 (0x8 << MOVE_LEN_SHIFT));
346
347 /* Class 2 operation */
348 append_operation(desc, ctx->class2_alg_type |
349 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
350
351 /* Read and write cryptlen bytes */
352 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
353
354 set_move_tgt_here(desc, read_move_cmd);
355 set_move_tgt_here(desc, write_move_cmd);
356 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
357 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
358 MOVE_AUX_LS);
359
360 /* Write ICV */
361 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
362 LDST_SRCDST_BYTE_CONTEXT);
363
364 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
365 desc_bytes(desc),
366 DMA_TO_DEVICE);
367 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
368 dev_err(jrdev, "unable to map shared descriptor\n");
369 return -ENOMEM;
370 }
371 #ifdef DEBUG
372 print_hex_dump(KERN_ERR,
373 "aead null enc shdesc@"__stringify(__LINE__)": ",
374 DUMP_PREFIX_ADDRESS, 16, 4, desc,
375 desc_bytes(desc), 1);
376 #endif
377
378 /*
379 * Job Descriptor and Shared Descriptors
380 * must all fit into the 64-word Descriptor h/w Buffer
381 */
382 keys_fit_inline = false;
383 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
384 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
385 keys_fit_inline = true;
386
387 desc = ctx->sh_desc_dec;
388
389 /* aead_decrypt shared descriptor */
390 init_sh_desc(desc, HDR_SHARE_SERIAL);
391
392 /* Skip if already shared */
393 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
394 JUMP_COND_SHRD);
395 if (keys_fit_inline)
396 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
397 ctx->split_key_len, CLASS_2 |
398 KEY_DEST_MDHA_SPLIT | KEY_ENC);
399 else
400 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
401 KEY_DEST_MDHA_SPLIT | KEY_ENC);
402 set_jump_tgt_here(desc, key_jump_cmd);
403
404 /* Class 2 operation */
405 append_operation(desc, ctx->class2_alg_type |
406 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
407
408 /* assoclen + cryptlen = seqoutlen */
409 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
410
411 /* Prepare to read and write cryptlen + assoclen bytes */
412 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
413 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
414
415 /*
416 * MOVE_LEN opcode is not available in all SEC HW revisions,
417 * thus need to do some magic, i.e. self-patch the descriptor
418 * buffer.
419 */
420 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
421 MOVE_DEST_MATH2 |
422 (0x6 << MOVE_LEN_SHIFT));
423 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
424 MOVE_DEST_DESCBUF |
425 MOVE_WAITCOMP |
426 (0x8 << MOVE_LEN_SHIFT));
427
428 /* Read and write cryptlen bytes */
429 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
430
431 /*
432 * Insert a NOP here, since we need at least 4 instructions between
433 * code patching the descriptor buffer and the location being patched.
434 */
435 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
436 set_jump_tgt_here(desc, jump_cmd);
437
438 set_move_tgt_here(desc, read_move_cmd);
439 set_move_tgt_here(desc, write_move_cmd);
440 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
441 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
442 MOVE_AUX_LS);
443 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
444
445 /* Load ICV */
446 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
447 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
448
449 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
450 desc_bytes(desc),
451 DMA_TO_DEVICE);
452 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
453 dev_err(jrdev, "unable to map shared descriptor\n");
454 return -ENOMEM;
455 }
456 #ifdef DEBUG
457 print_hex_dump(KERN_ERR,
458 "aead null dec shdesc@"__stringify(__LINE__)": ",
459 DUMP_PREFIX_ADDRESS, 16, 4, desc,
460 desc_bytes(desc), 1);
461 #endif
462
463 return 0;
464 }
465
aead_set_sh_desc(struct crypto_aead * aead)466 static int aead_set_sh_desc(struct crypto_aead *aead)
467 {
468 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
469 struct caam_aead_alg, aead);
470 unsigned int ivsize = crypto_aead_ivsize(aead);
471 struct caam_ctx *ctx = crypto_aead_ctx(aead);
472 struct device *jrdev = ctx->jrdev;
473 bool keys_fit_inline;
474 u32 geniv, moveiv;
475 u32 ctx1_iv_off = 0;
476 u32 *desc;
477 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
478 OP_ALG_AAI_CTR_MOD128);
479 const bool is_rfc3686 = alg->caam.rfc3686;
480
481 if (!ctx->authsize)
482 return 0;
483
484 /* NULL encryption / decryption */
485 if (!ctx->enckeylen)
486 return aead_null_set_sh_desc(aead);
487
488 /*
489 * AES-CTR needs to load IV in CONTEXT1 reg
490 * at an offset of 128bits (16bytes)
491 * CONTEXT1[255:128] = IV
492 */
493 if (ctr_mode)
494 ctx1_iv_off = 16;
495
496 /*
497 * RFC3686 specific:
498 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
499 */
500 if (is_rfc3686)
501 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
502
503 if (alg->caam.geniv)
504 goto skip_enc;
505
506 /*
507 * Job Descriptor and Shared Descriptors
508 * must all fit into the 64-word Descriptor h/w Buffer
509 */
510 keys_fit_inline = false;
511 if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
512 ctx->split_key_pad_len + ctx->enckeylen +
513 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
514 CAAM_DESC_BYTES_MAX)
515 keys_fit_inline = true;
516
517 /* aead_encrypt shared descriptor */
518 desc = ctx->sh_desc_enc;
519
520 /* Note: Context registers are saved. */
521 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
522
523 /* Class 2 operation */
524 append_operation(desc, ctx->class2_alg_type |
525 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
526
527 /* Read and write assoclen bytes */
528 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
529 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
530
531 /* Skip assoc data */
532 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
533
534 /* read assoc before reading payload */
535 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
536 FIFOLDST_VLF);
537
538 /* Load Counter into CONTEXT1 reg */
539 if (is_rfc3686)
540 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
541 LDST_SRCDST_BYTE_CONTEXT |
542 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
543 LDST_OFFSET_SHIFT));
544
545 /* Class 1 operation */
546 append_operation(desc, ctx->class1_alg_type |
547 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
548
549 /* Read and write cryptlen bytes */
550 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
551 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
552 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
553
554 /* Write ICV */
555 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
556 LDST_SRCDST_BYTE_CONTEXT);
557
558 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
559 desc_bytes(desc),
560 DMA_TO_DEVICE);
561 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
562 dev_err(jrdev, "unable to map shared descriptor\n");
563 return -ENOMEM;
564 }
565 #ifdef DEBUG
566 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
567 DUMP_PREFIX_ADDRESS, 16, 4, desc,
568 desc_bytes(desc), 1);
569 #endif
570
571 skip_enc:
572 /*
573 * Job Descriptor and Shared Descriptors
574 * must all fit into the 64-word Descriptor h/w Buffer
575 */
576 keys_fit_inline = false;
577 if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
578 ctx->split_key_pad_len + ctx->enckeylen +
579 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
580 CAAM_DESC_BYTES_MAX)
581 keys_fit_inline = true;
582
583 /* aead_decrypt shared descriptor */
584 desc = ctx->sh_desc_dec;
585
586 /* Note: Context registers are saved. */
587 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
588
589 /* Class 2 operation */
590 append_operation(desc, ctx->class2_alg_type |
591 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
592
593 /* Read and write assoclen bytes */
594 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
595 if (alg->caam.geniv)
596 append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
597 else
598 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
599
600 /* Skip assoc data */
601 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
602
603 /* read assoc before reading payload */
604 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
605 KEY_VLF);
606
607 if (alg->caam.geniv) {
608 append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
609 LDST_SRCDST_BYTE_CONTEXT |
610 (ctx1_iv_off << LDST_OFFSET_SHIFT));
611 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
612 (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
613 }
614
615 /* Load Counter into CONTEXT1 reg */
616 if (is_rfc3686)
617 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
618 LDST_SRCDST_BYTE_CONTEXT |
619 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
620 LDST_OFFSET_SHIFT));
621
622 /* Choose operation */
623 if (ctr_mode)
624 append_operation(desc, ctx->class1_alg_type |
625 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
626 else
627 append_dec_op1(desc, ctx->class1_alg_type);
628
629 /* Read and write cryptlen bytes */
630 append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
631 append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
632 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
633
634 /* Load ICV */
635 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
636 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
637
638 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
639 desc_bytes(desc),
640 DMA_TO_DEVICE);
641 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
642 dev_err(jrdev, "unable to map shared descriptor\n");
643 return -ENOMEM;
644 }
645 #ifdef DEBUG
646 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
647 DUMP_PREFIX_ADDRESS, 16, 4, desc,
648 desc_bytes(desc), 1);
649 #endif
650
651 if (!alg->caam.geniv)
652 goto skip_givenc;
653
654 /*
655 * Job Descriptor and Shared Descriptors
656 * must all fit into the 64-word Descriptor h/w Buffer
657 */
658 keys_fit_inline = false;
659 if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
660 ctx->split_key_pad_len + ctx->enckeylen +
661 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
662 CAAM_DESC_BYTES_MAX)
663 keys_fit_inline = true;
664
665 /* aead_givencrypt shared descriptor */
666 desc = ctx->sh_desc_enc;
667
668 /* Note: Context registers are saved. */
669 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
670
671 if (is_rfc3686)
672 goto copy_iv;
673
674 /* Generate IV */
675 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
676 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
677 NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
678 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
679 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
680 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
681 append_move(desc, MOVE_WAITCOMP |
682 MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
683 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
684 (ivsize << MOVE_LEN_SHIFT));
685 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
686
687 copy_iv:
688 /* Copy IV to class 1 context */
689 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
690 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
691 (ivsize << MOVE_LEN_SHIFT));
692
693 /* Return to encryption */
694 append_operation(desc, ctx->class2_alg_type |
695 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
696
697 /* Read and write assoclen bytes */
698 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
699 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
700
701 /* ivsize + cryptlen = seqoutlen - authsize */
702 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
703
704 /* Skip assoc data */
705 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
706
707 /* read assoc before reading payload */
708 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
709 KEY_VLF);
710
711 /* Copy iv from outfifo to class 2 fifo */
712 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
713 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
714 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
715 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
716 append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
717 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
718
719 /* Load Counter into CONTEXT1 reg */
720 if (is_rfc3686)
721 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
722 LDST_SRCDST_BYTE_CONTEXT |
723 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
724 LDST_OFFSET_SHIFT));
725
726 /* Class 1 operation */
727 append_operation(desc, ctx->class1_alg_type |
728 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
729
730 /* Will write ivsize + cryptlen */
731 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
732
733 /* Not need to reload iv */
734 append_seq_fifo_load(desc, ivsize,
735 FIFOLD_CLASS_SKIP);
736
737 /* Will read cryptlen */
738 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
739 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
740 FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
741 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
742
743 /* Write ICV */
744 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
745 LDST_SRCDST_BYTE_CONTEXT);
746
747 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
748 desc_bytes(desc),
749 DMA_TO_DEVICE);
750 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
751 dev_err(jrdev, "unable to map shared descriptor\n");
752 return -ENOMEM;
753 }
754 #ifdef DEBUG
755 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
756 DUMP_PREFIX_ADDRESS, 16, 4, desc,
757 desc_bytes(desc), 1);
758 #endif
759
760 skip_givenc:
761 return 0;
762 }
763
aead_setauthsize(struct crypto_aead * authenc,unsigned int authsize)764 static int aead_setauthsize(struct crypto_aead *authenc,
765 unsigned int authsize)
766 {
767 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
768
769 ctx->authsize = authsize;
770 aead_set_sh_desc(authenc);
771
772 return 0;
773 }
774
gcm_set_sh_desc(struct crypto_aead * aead)775 static int gcm_set_sh_desc(struct crypto_aead *aead)
776 {
777 struct caam_ctx *ctx = crypto_aead_ctx(aead);
778 struct device *jrdev = ctx->jrdev;
779 bool keys_fit_inline = false;
780 u32 *key_jump_cmd, *zero_payload_jump_cmd,
781 *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
782 u32 *desc;
783
784 if (!ctx->enckeylen || !ctx->authsize)
785 return 0;
786
787 /*
788 * AES GCM encrypt shared descriptor
789 * Job Descriptor and Shared Descriptor
790 * must fit into the 64-word Descriptor h/w Buffer
791 */
792 if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
793 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
794 keys_fit_inline = true;
795
796 desc = ctx->sh_desc_enc;
797
798 init_sh_desc(desc, HDR_SHARE_SERIAL);
799
800 /* skip key loading if they are loaded due to sharing */
801 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
802 JUMP_COND_SHRD | JUMP_COND_SELF);
803 if (keys_fit_inline)
804 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
805 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
806 else
807 append_key(desc, ctx->key_dma, ctx->enckeylen,
808 CLASS_1 | KEY_DEST_CLASS_REG);
809 set_jump_tgt_here(desc, key_jump_cmd);
810
811 /* class 1 operation */
812 append_operation(desc, ctx->class1_alg_type |
813 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
814
815 /* if assoclen + cryptlen is ZERO, skip to ICV write */
816 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
817 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
818 JUMP_COND_MATH_Z);
819
820 /* if assoclen is ZERO, skip reading the assoc data */
821 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
822 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
823 JUMP_COND_MATH_Z);
824
825 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
826
827 /* skip assoc data */
828 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
829
830 /* cryptlen = seqinlen - assoclen */
831 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
832
833 /* if cryptlen is ZERO jump to zero-payload commands */
834 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
835 JUMP_COND_MATH_Z);
836
837 /* read assoc data */
838 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
839 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
840 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
841
842 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
843
844 /* write encrypted data */
845 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
846
847 /* read payload data */
848 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
849 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
850
851 /* jump the zero-payload commands */
852 append_jump(desc, JUMP_TEST_ALL | 2);
853
854 /* zero-payload commands */
855 set_jump_tgt_here(desc, zero_payload_jump_cmd);
856
857 /* read assoc data */
858 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
859 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
860
861 /* There is no input data */
862 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
863
864 /* write ICV */
865 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
866 LDST_SRCDST_BYTE_CONTEXT);
867
868 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
869 desc_bytes(desc),
870 DMA_TO_DEVICE);
871 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
872 dev_err(jrdev, "unable to map shared descriptor\n");
873 return -ENOMEM;
874 }
875 #ifdef DEBUG
876 print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
877 DUMP_PREFIX_ADDRESS, 16, 4, desc,
878 desc_bytes(desc), 1);
879 #endif
880
881 /*
882 * Job Descriptor and Shared Descriptors
883 * must all fit into the 64-word Descriptor h/w Buffer
884 */
885 keys_fit_inline = false;
886 if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
887 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
888 keys_fit_inline = true;
889
890 desc = ctx->sh_desc_dec;
891
892 init_sh_desc(desc, HDR_SHARE_SERIAL);
893
894 /* skip key loading if they are loaded due to sharing */
895 key_jump_cmd = append_jump(desc, JUMP_JSL |
896 JUMP_TEST_ALL | JUMP_COND_SHRD |
897 JUMP_COND_SELF);
898 if (keys_fit_inline)
899 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
900 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
901 else
902 append_key(desc, ctx->key_dma, ctx->enckeylen,
903 CLASS_1 | KEY_DEST_CLASS_REG);
904 set_jump_tgt_here(desc, key_jump_cmd);
905
906 /* class 1 operation */
907 append_operation(desc, ctx->class1_alg_type |
908 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
909
910 /* if assoclen is ZERO, skip reading the assoc data */
911 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
912 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
913 JUMP_COND_MATH_Z);
914
915 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
916
917 /* skip assoc data */
918 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
919
920 /* read assoc data */
921 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
922 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
923
924 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
925
926 /* cryptlen = seqoutlen - assoclen */
927 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
928
929 /* jump to zero-payload command if cryptlen is zero */
930 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
931 JUMP_COND_MATH_Z);
932
933 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
934
935 /* store encrypted data */
936 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
937
938 /* read payload data */
939 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
940 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
941
942 /* zero-payload command */
943 set_jump_tgt_here(desc, zero_payload_jump_cmd);
944
945 /* read ICV */
946 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
947 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
948
949 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
950 desc_bytes(desc),
951 DMA_TO_DEVICE);
952 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
953 dev_err(jrdev, "unable to map shared descriptor\n");
954 return -ENOMEM;
955 }
956 #ifdef DEBUG
957 print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
958 DUMP_PREFIX_ADDRESS, 16, 4, desc,
959 desc_bytes(desc), 1);
960 #endif
961
962 return 0;
963 }
964
gcm_setauthsize(struct crypto_aead * authenc,unsigned int authsize)965 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
966 {
967 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
968
969 ctx->authsize = authsize;
970 gcm_set_sh_desc(authenc);
971
972 return 0;
973 }
974
rfc4106_set_sh_desc(struct crypto_aead * aead)975 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
976 {
977 struct caam_ctx *ctx = crypto_aead_ctx(aead);
978 struct device *jrdev = ctx->jrdev;
979 bool keys_fit_inline = false;
980 u32 *key_jump_cmd;
981 u32 *desc;
982
983 if (!ctx->enckeylen || !ctx->authsize)
984 return 0;
985
986 /*
987 * RFC4106 encrypt shared descriptor
988 * Job Descriptor and Shared Descriptor
989 * must fit into the 64-word Descriptor h/w Buffer
990 */
991 if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
992 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
993 keys_fit_inline = true;
994
995 desc = ctx->sh_desc_enc;
996
997 init_sh_desc(desc, HDR_SHARE_SERIAL);
998
999 /* Skip key loading if it is loaded due to sharing */
1000 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1001 JUMP_COND_SHRD);
1002 if (keys_fit_inline)
1003 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1004 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1005 else
1006 append_key(desc, ctx->key_dma, ctx->enckeylen,
1007 CLASS_1 | KEY_DEST_CLASS_REG);
1008 set_jump_tgt_here(desc, key_jump_cmd);
1009
1010 /* Class 1 operation */
1011 append_operation(desc, ctx->class1_alg_type |
1012 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1013
1014 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
1015 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1016
1017 /* Read assoc data */
1018 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1019 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1020
1021 /* Skip IV */
1022 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1023
1024 /* Will read cryptlen bytes */
1025 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1026
1027 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1028 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
1029
1030 /* Skip assoc data */
1031 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1032
1033 /* cryptlen = seqoutlen - assoclen */
1034 append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
1035
1036 /* Write encrypted data */
1037 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1038
1039 /* Read payload data */
1040 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1041 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1042
1043 /* Write ICV */
1044 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1045 LDST_SRCDST_BYTE_CONTEXT);
1046
1047 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1048 desc_bytes(desc),
1049 DMA_TO_DEVICE);
1050 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1051 dev_err(jrdev, "unable to map shared descriptor\n");
1052 return -ENOMEM;
1053 }
1054 #ifdef DEBUG
1055 print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1056 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1057 desc_bytes(desc), 1);
1058 #endif
1059
1060 /*
1061 * Job Descriptor and Shared Descriptors
1062 * must all fit into the 64-word Descriptor h/w Buffer
1063 */
1064 keys_fit_inline = false;
1065 if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
1066 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1067 keys_fit_inline = true;
1068
1069 desc = ctx->sh_desc_dec;
1070
1071 init_sh_desc(desc, HDR_SHARE_SERIAL);
1072
1073 /* Skip key loading if it is loaded due to sharing */
1074 key_jump_cmd = append_jump(desc, JUMP_JSL |
1075 JUMP_TEST_ALL | JUMP_COND_SHRD);
1076 if (keys_fit_inline)
1077 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1078 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1079 else
1080 append_key(desc, ctx->key_dma, ctx->enckeylen,
1081 CLASS_1 | KEY_DEST_CLASS_REG);
1082 set_jump_tgt_here(desc, key_jump_cmd);
1083
1084 /* Class 1 operation */
1085 append_operation(desc, ctx->class1_alg_type |
1086 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1087
1088 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
1089 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1090
1091 /* Read assoc data */
1092 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1093 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1094
1095 /* Skip IV */
1096 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1097
1098 /* Will read cryptlen bytes */
1099 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
1100
1101 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1102 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
1103
1104 /* Skip assoc data */
1105 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1106
1107 /* Will write cryptlen bytes */
1108 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1109
1110 /* Store payload data */
1111 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1112
1113 /* Read encrypted data */
1114 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1115 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1116
1117 /* Read ICV */
1118 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1119 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1120
1121 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1122 desc_bytes(desc),
1123 DMA_TO_DEVICE);
1124 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1125 dev_err(jrdev, "unable to map shared descriptor\n");
1126 return -ENOMEM;
1127 }
1128 #ifdef DEBUG
1129 print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1130 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1131 desc_bytes(desc), 1);
1132 #endif
1133
1134 return 0;
1135 }
1136
rfc4106_setauthsize(struct crypto_aead * authenc,unsigned int authsize)1137 static int rfc4106_setauthsize(struct crypto_aead *authenc,
1138 unsigned int authsize)
1139 {
1140 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1141
1142 ctx->authsize = authsize;
1143 rfc4106_set_sh_desc(authenc);
1144
1145 return 0;
1146 }
1147
rfc4543_set_sh_desc(struct crypto_aead * aead)1148 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1149 {
1150 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1151 struct device *jrdev = ctx->jrdev;
1152 bool keys_fit_inline = false;
1153 u32 *key_jump_cmd;
1154 u32 *read_move_cmd, *write_move_cmd;
1155 u32 *desc;
1156
1157 if (!ctx->enckeylen || !ctx->authsize)
1158 return 0;
1159
1160 /*
1161 * RFC4543 encrypt shared descriptor
1162 * Job Descriptor and Shared Descriptor
1163 * must fit into the 64-word Descriptor h/w Buffer
1164 */
1165 if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
1166 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1167 keys_fit_inline = true;
1168
1169 desc = ctx->sh_desc_enc;
1170
1171 init_sh_desc(desc, HDR_SHARE_SERIAL);
1172
1173 /* Skip key loading if it is loaded due to sharing */
1174 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1175 JUMP_COND_SHRD);
1176 if (keys_fit_inline)
1177 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1178 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1179 else
1180 append_key(desc, ctx->key_dma, ctx->enckeylen,
1181 CLASS_1 | KEY_DEST_CLASS_REG);
1182 set_jump_tgt_here(desc, key_jump_cmd);
1183
1184 /* Class 1 operation */
1185 append_operation(desc, ctx->class1_alg_type |
1186 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1187
1188 /* assoclen + cryptlen = seqinlen */
1189 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
1190
1191 /*
1192 * MOVE_LEN opcode is not available in all SEC HW revisions,
1193 * thus need to do some magic, i.e. self-patch the descriptor
1194 * buffer.
1195 */
1196 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1197 (0x6 << MOVE_LEN_SHIFT));
1198 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1199 (0x8 << MOVE_LEN_SHIFT));
1200
1201 /* Will read assoclen + cryptlen bytes */
1202 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1203
1204 /* Will write assoclen + cryptlen bytes */
1205 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1206
1207 /* Read and write assoclen + cryptlen bytes */
1208 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1209
1210 set_move_tgt_here(desc, read_move_cmd);
1211 set_move_tgt_here(desc, write_move_cmd);
1212 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1213 /* Move payload data to OFIFO */
1214 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1215
1216 /* Write ICV */
1217 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1218 LDST_SRCDST_BYTE_CONTEXT);
1219
1220 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1221 desc_bytes(desc),
1222 DMA_TO_DEVICE);
1223 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1224 dev_err(jrdev, "unable to map shared descriptor\n");
1225 return -ENOMEM;
1226 }
1227 #ifdef DEBUG
1228 print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1229 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1230 desc_bytes(desc), 1);
1231 #endif
1232
1233 /*
1234 * Job Descriptor and Shared Descriptors
1235 * must all fit into the 64-word Descriptor h/w Buffer
1236 */
1237 keys_fit_inline = false;
1238 if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
1239 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1240 keys_fit_inline = true;
1241
1242 desc = ctx->sh_desc_dec;
1243
1244 init_sh_desc(desc, HDR_SHARE_SERIAL);
1245
1246 /* Skip key loading if it is loaded due to sharing */
1247 key_jump_cmd = append_jump(desc, JUMP_JSL |
1248 JUMP_TEST_ALL | JUMP_COND_SHRD);
1249 if (keys_fit_inline)
1250 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1251 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1252 else
1253 append_key(desc, ctx->key_dma, ctx->enckeylen,
1254 CLASS_1 | KEY_DEST_CLASS_REG);
1255 set_jump_tgt_here(desc, key_jump_cmd);
1256
1257 /* Class 1 operation */
1258 append_operation(desc, ctx->class1_alg_type |
1259 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1260
1261 /* assoclen + cryptlen = seqoutlen */
1262 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1263
1264 /*
1265 * MOVE_LEN opcode is not available in all SEC HW revisions,
1266 * thus need to do some magic, i.e. self-patch the descriptor
1267 * buffer.
1268 */
1269 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1270 (0x6 << MOVE_LEN_SHIFT));
1271 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1272 (0x8 << MOVE_LEN_SHIFT));
1273
1274 /* Will read assoclen + cryptlen bytes */
1275 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1276
1277 /* Will write assoclen + cryptlen bytes */
1278 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1279
1280 /* Store payload data */
1281 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1282
1283 /* In-snoop assoclen + cryptlen data */
1284 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1285 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1286
1287 set_move_tgt_here(desc, read_move_cmd);
1288 set_move_tgt_here(desc, write_move_cmd);
1289 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1290 /* Move payload data to OFIFO */
1291 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1292 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1293
1294 /* Read ICV */
1295 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1296 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1297
1298 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1299 desc_bytes(desc),
1300 DMA_TO_DEVICE);
1301 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1302 dev_err(jrdev, "unable to map shared descriptor\n");
1303 return -ENOMEM;
1304 }
1305 #ifdef DEBUG
1306 print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1307 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1308 desc_bytes(desc), 1);
1309 #endif
1310
1311 return 0;
1312 }
1313
rfc4543_setauthsize(struct crypto_aead * authenc,unsigned int authsize)1314 static int rfc4543_setauthsize(struct crypto_aead *authenc,
1315 unsigned int authsize)
1316 {
1317 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1318
1319 ctx->authsize = authsize;
1320 rfc4543_set_sh_desc(authenc);
1321
1322 return 0;
1323 }
1324
gen_split_aead_key(struct caam_ctx * ctx,const u8 * key_in,u32 authkeylen)1325 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1326 u32 authkeylen)
1327 {
1328 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1329 ctx->split_key_pad_len, key_in, authkeylen,
1330 ctx->alg_op);
1331 }
1332
aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)1333 static int aead_setkey(struct crypto_aead *aead,
1334 const u8 *key, unsigned int keylen)
1335 {
1336 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1337 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1338 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1339 struct device *jrdev = ctx->jrdev;
1340 struct crypto_authenc_keys keys;
1341 int ret = 0;
1342
1343 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1344 goto badkey;
1345
1346 /* Pick class 2 key length from algorithm submask */
1347 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1348 OP_ALG_ALGSEL_SHIFT] * 2;
1349 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1350
1351 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1352 goto badkey;
1353
1354 #ifdef DEBUG
1355 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
1356 keys.authkeylen + keys.enckeylen, keys.enckeylen,
1357 keys.authkeylen);
1358 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1359 ctx->split_key_len, ctx->split_key_pad_len);
1360 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1361 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1362 #endif
1363
1364 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
1365 if (ret) {
1366 goto badkey;
1367 }
1368
1369 /* postpend encryption key to auth split key */
1370 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
1371
1372 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
1373 keys.enckeylen, DMA_TO_DEVICE);
1374 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1375 dev_err(jrdev, "unable to map key i/o memory\n");
1376 return -ENOMEM;
1377 }
1378 #ifdef DEBUG
1379 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
1380 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
1381 ctx->split_key_pad_len + keys.enckeylen, 1);
1382 #endif
1383
1384 ctx->enckeylen = keys.enckeylen;
1385
1386 ret = aead_set_sh_desc(aead);
1387 if (ret) {
1388 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
1389 keys.enckeylen, DMA_TO_DEVICE);
1390 }
1391
1392 return ret;
1393 badkey:
1394 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1395 return -EINVAL;
1396 }
1397
gcm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)1398 static int gcm_setkey(struct crypto_aead *aead,
1399 const u8 *key, unsigned int keylen)
1400 {
1401 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1402 struct device *jrdev = ctx->jrdev;
1403 int ret = 0;
1404
1405 #ifdef DEBUG
1406 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1407 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1408 #endif
1409
1410 memcpy(ctx->key, key, keylen);
1411 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1412 DMA_TO_DEVICE);
1413 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1414 dev_err(jrdev, "unable to map key i/o memory\n");
1415 return -ENOMEM;
1416 }
1417 ctx->enckeylen = keylen;
1418
1419 ret = gcm_set_sh_desc(aead);
1420 if (ret) {
1421 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1422 DMA_TO_DEVICE);
1423 }
1424
1425 return ret;
1426 }
1427
rfc4106_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)1428 static int rfc4106_setkey(struct crypto_aead *aead,
1429 const u8 *key, unsigned int keylen)
1430 {
1431 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1432 struct device *jrdev = ctx->jrdev;
1433 int ret = 0;
1434
1435 if (keylen < 4)
1436 return -EINVAL;
1437
1438 #ifdef DEBUG
1439 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1440 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1441 #endif
1442
1443 memcpy(ctx->key, key, keylen);
1444
1445 /*
1446 * The last four bytes of the key material are used as the salt value
1447 * in the nonce. Update the AES key length.
1448 */
1449 ctx->enckeylen = keylen - 4;
1450
1451 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1452 DMA_TO_DEVICE);
1453 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1454 dev_err(jrdev, "unable to map key i/o memory\n");
1455 return -ENOMEM;
1456 }
1457
1458 ret = rfc4106_set_sh_desc(aead);
1459 if (ret) {
1460 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1461 DMA_TO_DEVICE);
1462 }
1463
1464 return ret;
1465 }
1466
rfc4543_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)1467 static int rfc4543_setkey(struct crypto_aead *aead,
1468 const u8 *key, unsigned int keylen)
1469 {
1470 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1471 struct device *jrdev = ctx->jrdev;
1472 int ret = 0;
1473
1474 if (keylen < 4)
1475 return -EINVAL;
1476
1477 #ifdef DEBUG
1478 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1479 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1480 #endif
1481
1482 memcpy(ctx->key, key, keylen);
1483
1484 /*
1485 * The last four bytes of the key material are used as the salt value
1486 * in the nonce. Update the AES key length.
1487 */
1488 ctx->enckeylen = keylen - 4;
1489
1490 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1491 DMA_TO_DEVICE);
1492 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1493 dev_err(jrdev, "unable to map key i/o memory\n");
1494 return -ENOMEM;
1495 }
1496
1497 ret = rfc4543_set_sh_desc(aead);
1498 if (ret) {
1499 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1500 DMA_TO_DEVICE);
1501 }
1502
1503 return ret;
1504 }
1505
ablkcipher_setkey(struct crypto_ablkcipher * ablkcipher,const u8 * key,unsigned int keylen)1506 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1507 const u8 *key, unsigned int keylen)
1508 {
1509 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1510 struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1511 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1512 const char *alg_name = crypto_tfm_alg_name(tfm);
1513 struct device *jrdev = ctx->jrdev;
1514 int ret = 0;
1515 u32 *key_jump_cmd;
1516 u32 *desc;
1517 u8 *nonce;
1518 u32 geniv;
1519 u32 ctx1_iv_off = 0;
1520 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1521 OP_ALG_AAI_CTR_MOD128);
1522 const bool is_rfc3686 = (ctr_mode &&
1523 (strstr(alg_name, "rfc3686") != NULL));
1524
1525 #ifdef DEBUG
1526 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1527 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1528 #endif
1529 /*
1530 * AES-CTR needs to load IV in CONTEXT1 reg
1531 * at an offset of 128bits (16bytes)
1532 * CONTEXT1[255:128] = IV
1533 */
1534 if (ctr_mode)
1535 ctx1_iv_off = 16;
1536
1537 /*
1538 * RFC3686 specific:
1539 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1540 * | *key = {KEY, NONCE}
1541 */
1542 if (is_rfc3686) {
1543 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1544 keylen -= CTR_RFC3686_NONCE_SIZE;
1545 }
1546
1547 memcpy(ctx->key, key, keylen);
1548 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1549 DMA_TO_DEVICE);
1550 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1551 dev_err(jrdev, "unable to map key i/o memory\n");
1552 return -ENOMEM;
1553 }
1554 ctx->enckeylen = keylen;
1555
1556 /* ablkcipher_encrypt shared descriptor */
1557 desc = ctx->sh_desc_enc;
1558 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1559 /* Skip if already shared */
1560 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1561 JUMP_COND_SHRD);
1562
1563 /* Load class1 key only */
1564 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1565 ctx->enckeylen, CLASS_1 |
1566 KEY_DEST_CLASS_REG);
1567
1568 /* Load nonce into CONTEXT1 reg */
1569 if (is_rfc3686) {
1570 nonce = (u8 *)key + keylen;
1571 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1572 LDST_CLASS_IND_CCB |
1573 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1574 append_move(desc, MOVE_WAITCOMP |
1575 MOVE_SRC_OUTFIFO |
1576 MOVE_DEST_CLASS1CTX |
1577 (16 << MOVE_OFFSET_SHIFT) |
1578 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1579 }
1580
1581 set_jump_tgt_here(desc, key_jump_cmd);
1582
1583 /* Load iv */
1584 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1585 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1586
1587 /* Load counter into CONTEXT1 reg */
1588 if (is_rfc3686)
1589 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1590 LDST_SRCDST_BYTE_CONTEXT |
1591 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1592 LDST_OFFSET_SHIFT));
1593
1594 /* Load operation */
1595 append_operation(desc, ctx->class1_alg_type |
1596 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1597
1598 /* Perform operation */
1599 ablkcipher_append_src_dst(desc);
1600
1601 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1602 desc_bytes(desc),
1603 DMA_TO_DEVICE);
1604 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1605 dev_err(jrdev, "unable to map shared descriptor\n");
1606 return -ENOMEM;
1607 }
1608 #ifdef DEBUG
1609 print_hex_dump(KERN_ERR,
1610 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
1611 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1612 desc_bytes(desc), 1);
1613 #endif
1614 /* ablkcipher_decrypt shared descriptor */
1615 desc = ctx->sh_desc_dec;
1616
1617 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1618 /* Skip if already shared */
1619 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1620 JUMP_COND_SHRD);
1621
1622 /* Load class1 key only */
1623 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1624 ctx->enckeylen, CLASS_1 |
1625 KEY_DEST_CLASS_REG);
1626
1627 /* Load nonce into CONTEXT1 reg */
1628 if (is_rfc3686) {
1629 nonce = (u8 *)key + keylen;
1630 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1631 LDST_CLASS_IND_CCB |
1632 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1633 append_move(desc, MOVE_WAITCOMP |
1634 MOVE_SRC_OUTFIFO |
1635 MOVE_DEST_CLASS1CTX |
1636 (16 << MOVE_OFFSET_SHIFT) |
1637 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1638 }
1639
1640 set_jump_tgt_here(desc, key_jump_cmd);
1641
1642 /* load IV */
1643 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1644 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1645
1646 /* Load counter into CONTEXT1 reg */
1647 if (is_rfc3686)
1648 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1649 LDST_SRCDST_BYTE_CONTEXT |
1650 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1651 LDST_OFFSET_SHIFT));
1652
1653 /* Choose operation */
1654 if (ctr_mode)
1655 append_operation(desc, ctx->class1_alg_type |
1656 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1657 else
1658 append_dec_op1(desc, ctx->class1_alg_type);
1659
1660 /* Perform operation */
1661 ablkcipher_append_src_dst(desc);
1662
1663 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1664 desc_bytes(desc),
1665 DMA_TO_DEVICE);
1666 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1667 dev_err(jrdev, "unable to map shared descriptor\n");
1668 return -ENOMEM;
1669 }
1670
1671 #ifdef DEBUG
1672 print_hex_dump(KERN_ERR,
1673 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
1674 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1675 desc_bytes(desc), 1);
1676 #endif
1677 /* ablkcipher_givencrypt shared descriptor */
1678 desc = ctx->sh_desc_givenc;
1679
1680 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1681 /* Skip if already shared */
1682 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1683 JUMP_COND_SHRD);
1684
1685 /* Load class1 key only */
1686 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1687 ctx->enckeylen, CLASS_1 |
1688 KEY_DEST_CLASS_REG);
1689
1690 /* Load Nonce into CONTEXT1 reg */
1691 if (is_rfc3686) {
1692 nonce = (u8 *)key + keylen;
1693 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1694 LDST_CLASS_IND_CCB |
1695 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1696 append_move(desc, MOVE_WAITCOMP |
1697 MOVE_SRC_OUTFIFO |
1698 MOVE_DEST_CLASS1CTX |
1699 (16 << MOVE_OFFSET_SHIFT) |
1700 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1701 }
1702 set_jump_tgt_here(desc, key_jump_cmd);
1703
1704 /* Generate IV */
1705 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1706 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1707 NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
1708 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1709 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1710 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1711 append_move(desc, MOVE_WAITCOMP |
1712 MOVE_SRC_INFIFO |
1713 MOVE_DEST_CLASS1CTX |
1714 (crt->ivsize << MOVE_LEN_SHIFT) |
1715 (ctx1_iv_off << MOVE_OFFSET_SHIFT));
1716 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1717
1718 /* Copy generated IV to memory */
1719 append_seq_store(desc, crt->ivsize,
1720 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1721 (ctx1_iv_off << LDST_OFFSET_SHIFT));
1722
1723 /* Load Counter into CONTEXT1 reg */
1724 if (is_rfc3686)
1725 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1726 LDST_SRCDST_BYTE_CONTEXT |
1727 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1728 LDST_OFFSET_SHIFT));
1729
1730 if (ctx1_iv_off)
1731 append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
1732 (1 << JUMP_OFFSET_SHIFT));
1733
1734 /* Load operation */
1735 append_operation(desc, ctx->class1_alg_type |
1736 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1737
1738 /* Perform operation */
1739 ablkcipher_append_src_dst(desc);
1740
1741 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1742 desc_bytes(desc),
1743 DMA_TO_DEVICE);
1744 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1745 dev_err(jrdev, "unable to map shared descriptor\n");
1746 return -ENOMEM;
1747 }
1748 #ifdef DEBUG
1749 print_hex_dump(KERN_ERR,
1750 "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
1751 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1752 desc_bytes(desc), 1);
1753 #endif
1754
1755 return ret;
1756 }
1757
xts_ablkcipher_setkey(struct crypto_ablkcipher * ablkcipher,const u8 * key,unsigned int keylen)1758 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1759 const u8 *key, unsigned int keylen)
1760 {
1761 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1762 struct device *jrdev = ctx->jrdev;
1763 u32 *key_jump_cmd, *desc;
1764 __be64 sector_size = cpu_to_be64(512);
1765
1766 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
1767 crypto_ablkcipher_set_flags(ablkcipher,
1768 CRYPTO_TFM_RES_BAD_KEY_LEN);
1769 dev_err(jrdev, "key size mismatch\n");
1770 return -EINVAL;
1771 }
1772
1773 memcpy(ctx->key, key, keylen);
1774 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
1775 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1776 dev_err(jrdev, "unable to map key i/o memory\n");
1777 return -ENOMEM;
1778 }
1779 ctx->enckeylen = keylen;
1780
1781 /* xts_ablkcipher_encrypt shared descriptor */
1782 desc = ctx->sh_desc_enc;
1783 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1784 /* Skip if already shared */
1785 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1786 JUMP_COND_SHRD);
1787
1788 /* Load class1 keys only */
1789 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1790 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1791
1792 /* Load sector size with index 40 bytes (0x28) */
1793 append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1794 LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1795 append_data(desc, (void *)§or_size, 8);
1796
1797 set_jump_tgt_here(desc, key_jump_cmd);
1798
1799 /*
1800 * create sequence for loading the sector index
1801 * Upper 8B of IV - will be used as sector index
1802 * Lower 8B of IV - will be discarded
1803 */
1804 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1805 LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1806 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1807
1808 /* Load operation */
1809 append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
1810 OP_ALG_ENCRYPT);
1811
1812 /* Perform operation */
1813 ablkcipher_append_src_dst(desc);
1814
1815 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1816 DMA_TO_DEVICE);
1817 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1818 dev_err(jrdev, "unable to map shared descriptor\n");
1819 return -ENOMEM;
1820 }
1821 #ifdef DEBUG
1822 print_hex_dump(KERN_ERR,
1823 "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
1824 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1825 #endif
1826
1827 /* xts_ablkcipher_decrypt shared descriptor */
1828 desc = ctx->sh_desc_dec;
1829
1830 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1831 /* Skip if already shared */
1832 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1833 JUMP_COND_SHRD);
1834
1835 /* Load class1 key only */
1836 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1837 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1838
1839 /* Load sector size with index 40 bytes (0x28) */
1840 append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1841 LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1842 append_data(desc, (void *)§or_size, 8);
1843
1844 set_jump_tgt_here(desc, key_jump_cmd);
1845
1846 /*
1847 * create sequence for loading the sector index
1848 * Upper 8B of IV - will be used as sector index
1849 * Lower 8B of IV - will be discarded
1850 */
1851 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1852 LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1853 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1854
1855 /* Load operation */
1856 append_dec_op1(desc, ctx->class1_alg_type);
1857
1858 /* Perform operation */
1859 ablkcipher_append_src_dst(desc);
1860
1861 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1862 DMA_TO_DEVICE);
1863 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1864 dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
1865 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
1866 dev_err(jrdev, "unable to map shared descriptor\n");
1867 return -ENOMEM;
1868 }
1869 #ifdef DEBUG
1870 print_hex_dump(KERN_ERR,
1871 "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
1872 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1873 #endif
1874
1875 return 0;
1876 }
1877
1878 /*
1879 * aead_edesc - s/w-extended aead descriptor
1880 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
1881 * @src_nents: number of segments in input scatterlist
1882 * @dst_nents: number of segments in output scatterlist
1883 * @iv_dma: dma address of iv for checking continuity and link table
1884 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1885 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1886 * @sec4_sg_dma: bus physical mapped address of h/w link table
1887 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1888 */
1889 struct aead_edesc {
1890 int assoc_nents;
1891 int src_nents;
1892 int dst_nents;
1893 dma_addr_t iv_dma;
1894 int sec4_sg_bytes;
1895 dma_addr_t sec4_sg_dma;
1896 struct sec4_sg_entry *sec4_sg;
1897 u32 hw_desc[];
1898 };
1899
1900 /*
1901 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1902 * @src_nents: number of segments in input scatterlist
1903 * @dst_nents: number of segments in output scatterlist
1904 * @iv_dma: dma address of iv for checking continuity and link table
1905 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1906 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1907 * @sec4_sg_dma: bus physical mapped address of h/w link table
1908 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1909 */
1910 struct ablkcipher_edesc {
1911 int src_nents;
1912 int dst_nents;
1913 dma_addr_t iv_dma;
1914 int sec4_sg_bytes;
1915 dma_addr_t sec4_sg_dma;
1916 struct sec4_sg_entry *sec4_sg;
1917 u32 hw_desc[0];
1918 };
1919
caam_unmap(struct device * dev,struct scatterlist * src,struct scatterlist * dst,int src_nents,int dst_nents,dma_addr_t iv_dma,int ivsize,dma_addr_t sec4_sg_dma,int sec4_sg_bytes)1920 static void caam_unmap(struct device *dev, struct scatterlist *src,
1921 struct scatterlist *dst, int src_nents,
1922 int dst_nents,
1923 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1924 int sec4_sg_bytes)
1925 {
1926 if (dst != src) {
1927 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
1928 dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
1929 } else {
1930 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
1931 }
1932
1933 if (iv_dma)
1934 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1935 if (sec4_sg_bytes)
1936 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
1937 DMA_TO_DEVICE);
1938 }
1939
aead_unmap(struct device * dev,struct aead_edesc * edesc,struct aead_request * req)1940 static void aead_unmap(struct device *dev,
1941 struct aead_edesc *edesc,
1942 struct aead_request *req)
1943 {
1944 caam_unmap(dev, req->src, req->dst,
1945 edesc->src_nents, edesc->dst_nents, 0, 0,
1946 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1947 }
1948
ablkcipher_unmap(struct device * dev,struct ablkcipher_edesc * edesc,struct ablkcipher_request * req)1949 static void ablkcipher_unmap(struct device *dev,
1950 struct ablkcipher_edesc *edesc,
1951 struct ablkcipher_request *req)
1952 {
1953 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1954 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1955
1956 caam_unmap(dev, req->src, req->dst,
1957 edesc->src_nents, edesc->dst_nents,
1958 edesc->iv_dma, ivsize,
1959 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1960 }
1961
aead_encrypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)1962 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1963 void *context)
1964 {
1965 struct aead_request *req = context;
1966 struct aead_edesc *edesc;
1967
1968 #ifdef DEBUG
1969 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1970 #endif
1971
1972 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1973
1974 if (err)
1975 caam_jr_strstatus(jrdev, err);
1976
1977 aead_unmap(jrdev, edesc, req);
1978
1979 kfree(edesc);
1980
1981 aead_request_complete(req, err);
1982 }
1983
aead_decrypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)1984 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1985 void *context)
1986 {
1987 struct aead_request *req = context;
1988 struct aead_edesc *edesc;
1989
1990 #ifdef DEBUG
1991 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1992 #endif
1993
1994 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1995
1996 if (err)
1997 caam_jr_strstatus(jrdev, err);
1998
1999 aead_unmap(jrdev, edesc, req);
2000
2001 /*
2002 * verify hw auth check passed else return -EBADMSG
2003 */
2004 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
2005 err = -EBADMSG;
2006
2007 kfree(edesc);
2008
2009 aead_request_complete(req, err);
2010 }
2011
ablkcipher_encrypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)2012 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
2013 void *context)
2014 {
2015 struct ablkcipher_request *req = context;
2016 struct ablkcipher_edesc *edesc;
2017 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2018 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2019
2020 #ifdef DEBUG
2021 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2022 #endif
2023
2024 edesc = (struct ablkcipher_edesc *)((char *)desc -
2025 offsetof(struct ablkcipher_edesc, hw_desc));
2026
2027 if (err)
2028 caam_jr_strstatus(jrdev, err);
2029
2030 #ifdef DEBUG
2031 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
2032 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2033 edesc->src_nents > 1 ? 100 : ivsize, 1);
2034 dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
2035 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
2036 edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
2037 #endif
2038
2039 ablkcipher_unmap(jrdev, edesc, req);
2040
2041 /*
2042 * The crypto API expects us to set the IV (req->info) to the last
2043 * ciphertext block. This is used e.g. by the CTS mode.
2044 */
2045 scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
2046 ivsize, 0);
2047
2048 kfree(edesc);
2049
2050 ablkcipher_request_complete(req, err);
2051 }
2052
ablkcipher_decrypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)2053 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
2054 void *context)
2055 {
2056 struct ablkcipher_request *req = context;
2057 struct ablkcipher_edesc *edesc;
2058 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2059 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2060
2061 #ifdef DEBUG
2062 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2063 #endif
2064
2065 edesc = (struct ablkcipher_edesc *)((char *)desc -
2066 offsetof(struct ablkcipher_edesc, hw_desc));
2067 if (err)
2068 caam_jr_strstatus(jrdev, err);
2069
2070 #ifdef DEBUG
2071 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
2072 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2073 ivsize, 1);
2074 dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
2075 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
2076 edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
2077 #endif
2078
2079 ablkcipher_unmap(jrdev, edesc, req);
2080
2081 /*
2082 * The crypto API expects us to set the IV (req->info) to the last
2083 * ciphertext block.
2084 */
2085 scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
2086 ivsize, 0);
2087
2088 kfree(edesc);
2089
2090 ablkcipher_request_complete(req, err);
2091 }
2092
2093 /*
2094 * Fill in aead job descriptor
2095 */
init_aead_job(struct aead_request * req,struct aead_edesc * edesc,bool all_contig,bool encrypt)2096 static void init_aead_job(struct aead_request *req,
2097 struct aead_edesc *edesc,
2098 bool all_contig, bool encrypt)
2099 {
2100 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2101 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2102 int authsize = ctx->authsize;
2103 u32 *desc = edesc->hw_desc;
2104 u32 out_options, in_options;
2105 dma_addr_t dst_dma, src_dma;
2106 int len, sec4_sg_index = 0;
2107 dma_addr_t ptr;
2108 u32 *sh_desc;
2109
2110 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
2111 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
2112
2113 len = desc_len(sh_desc);
2114 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2115
2116 if (all_contig) {
2117 src_dma = sg_dma_address(req->src);
2118 in_options = 0;
2119 } else {
2120 src_dma = edesc->sec4_sg_dma;
2121 sec4_sg_index += edesc->src_nents;
2122 in_options = LDST_SGF;
2123 }
2124
2125 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
2126 in_options);
2127
2128 dst_dma = src_dma;
2129 out_options = in_options;
2130
2131 if (unlikely(req->src != req->dst)) {
2132 if (!edesc->dst_nents) {
2133 dst_dma = sg_dma_address(req->dst);
2134 } else {
2135 dst_dma = edesc->sec4_sg_dma +
2136 sec4_sg_index *
2137 sizeof(struct sec4_sg_entry);
2138 out_options = LDST_SGF;
2139 }
2140 }
2141
2142 if (encrypt)
2143 append_seq_out_ptr(desc, dst_dma,
2144 req->assoclen + req->cryptlen + authsize,
2145 out_options);
2146 else
2147 append_seq_out_ptr(desc, dst_dma,
2148 req->assoclen + req->cryptlen - authsize,
2149 out_options);
2150
2151 /* REG3 = assoclen */
2152 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
2153 }
2154
init_gcm_job(struct aead_request * req,struct aead_edesc * edesc,bool all_contig,bool encrypt)2155 static void init_gcm_job(struct aead_request *req,
2156 struct aead_edesc *edesc,
2157 bool all_contig, bool encrypt)
2158 {
2159 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2160 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2161 unsigned int ivsize = crypto_aead_ivsize(aead);
2162 u32 *desc = edesc->hw_desc;
2163 bool generic_gcm = (ivsize == 12);
2164 unsigned int last;
2165
2166 init_aead_job(req, edesc, all_contig, encrypt);
2167
2168 /* BUG This should not be specific to generic GCM. */
2169 last = 0;
2170 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
2171 last = FIFOLD_TYPE_LAST1;
2172
2173 /* Read GCM IV */
2174 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
2175 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
2176 /* Append Salt */
2177 if (!generic_gcm)
2178 append_data(desc, ctx->key + ctx->enckeylen, 4);
2179 /* Append IV */
2180 append_data(desc, req->iv, ivsize);
2181 /* End of blank commands */
2182 }
2183
init_authenc_job(struct aead_request * req,struct aead_edesc * edesc,bool all_contig,bool encrypt)2184 static void init_authenc_job(struct aead_request *req,
2185 struct aead_edesc *edesc,
2186 bool all_contig, bool encrypt)
2187 {
2188 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2189 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
2190 struct caam_aead_alg, aead);
2191 unsigned int ivsize = crypto_aead_ivsize(aead);
2192 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2193 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
2194 OP_ALG_AAI_CTR_MOD128);
2195 const bool is_rfc3686 = alg->caam.rfc3686;
2196 u32 *desc = edesc->hw_desc;
2197 u32 ivoffset = 0;
2198
2199 /*
2200 * AES-CTR needs to load IV in CONTEXT1 reg
2201 * at an offset of 128bits (16bytes)
2202 * CONTEXT1[255:128] = IV
2203 */
2204 if (ctr_mode)
2205 ivoffset = 16;
2206
2207 /*
2208 * RFC3686 specific:
2209 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
2210 */
2211 if (is_rfc3686)
2212 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
2213
2214 init_aead_job(req, edesc, all_contig, encrypt);
2215
2216 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
2217 append_load_as_imm(desc, req->iv, ivsize,
2218 LDST_CLASS_1_CCB |
2219 LDST_SRCDST_BYTE_CONTEXT |
2220 (ivoffset << LDST_OFFSET_SHIFT));
2221 }
2222
2223 /*
2224 * Fill in ablkcipher job descriptor
2225 */
init_ablkcipher_job(u32 * sh_desc,dma_addr_t ptr,struct ablkcipher_edesc * edesc,struct ablkcipher_request * req,bool iv_contig)2226 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2227 struct ablkcipher_edesc *edesc,
2228 struct ablkcipher_request *req,
2229 bool iv_contig)
2230 {
2231 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2232 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2233 u32 *desc = edesc->hw_desc;
2234 u32 out_options = 0, in_options;
2235 dma_addr_t dst_dma, src_dma;
2236 int len, sec4_sg_index = 0;
2237
2238 #ifdef DEBUG
2239 bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2240 CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
2241 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
2242 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2243 ivsize, 1);
2244 printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
2245 dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
2246 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
2247 edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
2248 #endif
2249
2250 len = desc_len(sh_desc);
2251 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2252
2253 if (iv_contig) {
2254 src_dma = edesc->iv_dma;
2255 in_options = 0;
2256 } else {
2257 src_dma = edesc->sec4_sg_dma;
2258 sec4_sg_index += edesc->src_nents + 1;
2259 in_options = LDST_SGF;
2260 }
2261 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2262
2263 if (likely(req->src == req->dst)) {
2264 if (!edesc->src_nents && iv_contig) {
2265 dst_dma = sg_dma_address(req->src);
2266 } else {
2267 dst_dma = edesc->sec4_sg_dma +
2268 sizeof(struct sec4_sg_entry);
2269 out_options = LDST_SGF;
2270 }
2271 } else {
2272 if (!edesc->dst_nents) {
2273 dst_dma = sg_dma_address(req->dst);
2274 } else {
2275 dst_dma = edesc->sec4_sg_dma +
2276 sec4_sg_index * sizeof(struct sec4_sg_entry);
2277 out_options = LDST_SGF;
2278 }
2279 }
2280 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2281 }
2282
2283 /*
2284 * Fill in ablkcipher givencrypt job descriptor
2285 */
init_ablkcipher_giv_job(u32 * sh_desc,dma_addr_t ptr,struct ablkcipher_edesc * edesc,struct ablkcipher_request * req,bool iv_contig)2286 static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
2287 struct ablkcipher_edesc *edesc,
2288 struct ablkcipher_request *req,
2289 bool iv_contig)
2290 {
2291 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2292 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2293 u32 *desc = edesc->hw_desc;
2294 u32 out_options, in_options;
2295 dma_addr_t dst_dma, src_dma;
2296 int len, sec4_sg_index = 0;
2297
2298 #ifdef DEBUG
2299 bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2300 CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
2301 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
2302 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2303 ivsize, 1);
2304 dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
2305 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
2306 edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
2307 #endif
2308
2309 len = desc_len(sh_desc);
2310 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2311
2312 if (!edesc->src_nents) {
2313 src_dma = sg_dma_address(req->src);
2314 in_options = 0;
2315 } else {
2316 src_dma = edesc->sec4_sg_dma;
2317 sec4_sg_index += edesc->src_nents;
2318 in_options = LDST_SGF;
2319 }
2320 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
2321
2322 if (iv_contig) {
2323 dst_dma = edesc->iv_dma;
2324 out_options = 0;
2325 } else {
2326 dst_dma = edesc->sec4_sg_dma +
2327 sec4_sg_index * sizeof(struct sec4_sg_entry);
2328 out_options = LDST_SGF;
2329 }
2330 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
2331 }
2332
2333 /*
2334 * allocate and map the aead extended descriptor
2335 */
aead_edesc_alloc(struct aead_request * req,int desc_bytes,bool * all_contig_ptr,bool encrypt)2336 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2337 int desc_bytes, bool *all_contig_ptr,
2338 bool encrypt)
2339 {
2340 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2341 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2342 struct device *jrdev = ctx->jrdev;
2343 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2344 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2345 int src_nents, dst_nents = 0;
2346 struct aead_edesc *edesc;
2347 int sgc;
2348 bool all_contig = true;
2349 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2350 unsigned int authsize = ctx->authsize;
2351
2352 if (unlikely(req->dst != req->src)) {
2353 src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
2354 dst_nents = sg_count(req->dst,
2355 req->assoclen + req->cryptlen +
2356 (encrypt ? authsize : (-authsize)));
2357 } else {
2358 src_nents = sg_count(req->src,
2359 req->assoclen + req->cryptlen +
2360 (encrypt ? authsize : 0));
2361 }
2362
2363 /* Check if data are contiguous. */
2364 all_contig = !src_nents;
2365 if (!all_contig) {
2366 src_nents = src_nents ? : 1;
2367 sec4_sg_len = src_nents;
2368 }
2369
2370 sec4_sg_len += dst_nents;
2371
2372 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2373
2374 /* allocate space for base edesc and hw desc commands, link tables */
2375 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2376 GFP_DMA | flags);
2377 if (!edesc) {
2378 dev_err(jrdev, "could not allocate extended descriptor\n");
2379 return ERR_PTR(-ENOMEM);
2380 }
2381
2382 if (likely(req->src == req->dst)) {
2383 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2384 DMA_BIDIRECTIONAL);
2385 if (unlikely(!sgc)) {
2386 dev_err(jrdev, "unable to map source\n");
2387 kfree(edesc);
2388 return ERR_PTR(-ENOMEM);
2389 }
2390 } else {
2391 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2392 DMA_TO_DEVICE);
2393 if (unlikely(!sgc)) {
2394 dev_err(jrdev, "unable to map source\n");
2395 kfree(edesc);
2396 return ERR_PTR(-ENOMEM);
2397 }
2398
2399 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2400 DMA_FROM_DEVICE);
2401 if (unlikely(!sgc)) {
2402 dev_err(jrdev, "unable to map destination\n");
2403 dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
2404 DMA_TO_DEVICE);
2405 kfree(edesc);
2406 return ERR_PTR(-ENOMEM);
2407 }
2408 }
2409
2410 edesc->src_nents = src_nents;
2411 edesc->dst_nents = dst_nents;
2412 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2413 desc_bytes;
2414 *all_contig_ptr = all_contig;
2415
2416 sec4_sg_index = 0;
2417 if (!all_contig) {
2418 sg_to_sec4_sg_last(req->src, src_nents,
2419 edesc->sec4_sg + sec4_sg_index, 0);
2420 sec4_sg_index += src_nents;
2421 }
2422 if (dst_nents) {
2423 sg_to_sec4_sg_last(req->dst, dst_nents,
2424 edesc->sec4_sg + sec4_sg_index, 0);
2425 }
2426
2427 if (!sec4_sg_bytes)
2428 return edesc;
2429
2430 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2431 sec4_sg_bytes, DMA_TO_DEVICE);
2432 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2433 dev_err(jrdev, "unable to map S/G table\n");
2434 aead_unmap(jrdev, edesc, req);
2435 kfree(edesc);
2436 return ERR_PTR(-ENOMEM);
2437 }
2438
2439 edesc->sec4_sg_bytes = sec4_sg_bytes;
2440
2441 return edesc;
2442 }
2443
gcm_encrypt(struct aead_request * req)2444 static int gcm_encrypt(struct aead_request *req)
2445 {
2446 struct aead_edesc *edesc;
2447 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2448 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2449 struct device *jrdev = ctx->jrdev;
2450 bool all_contig;
2451 u32 *desc;
2452 int ret = 0;
2453
2454 /* allocate extended descriptor */
2455 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
2456 if (IS_ERR(edesc))
2457 return PTR_ERR(edesc);
2458
2459 /* Create and submit job descriptor */
2460 init_gcm_job(req, edesc, all_contig, true);
2461 #ifdef DEBUG
2462 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2463 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2464 desc_bytes(edesc->hw_desc), 1);
2465 #endif
2466
2467 desc = edesc->hw_desc;
2468 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2469 if (!ret) {
2470 ret = -EINPROGRESS;
2471 } else {
2472 aead_unmap(jrdev, edesc, req);
2473 kfree(edesc);
2474 }
2475
2476 return ret;
2477 }
2478
ipsec_gcm_encrypt(struct aead_request * req)2479 static int ipsec_gcm_encrypt(struct aead_request *req)
2480 {
2481 if (req->assoclen < 8)
2482 return -EINVAL;
2483
2484 return gcm_encrypt(req);
2485 }
2486
aead_encrypt(struct aead_request * req)2487 static int aead_encrypt(struct aead_request *req)
2488 {
2489 struct aead_edesc *edesc;
2490 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2491 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2492 struct device *jrdev = ctx->jrdev;
2493 bool all_contig;
2494 u32 *desc;
2495 int ret = 0;
2496
2497 /* allocate extended descriptor */
2498 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2499 &all_contig, true);
2500 if (IS_ERR(edesc))
2501 return PTR_ERR(edesc);
2502
2503 /* Create and submit job descriptor */
2504 init_authenc_job(req, edesc, all_contig, true);
2505 #ifdef DEBUG
2506 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2507 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2508 desc_bytes(edesc->hw_desc), 1);
2509 #endif
2510
2511 desc = edesc->hw_desc;
2512 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2513 if (!ret) {
2514 ret = -EINPROGRESS;
2515 } else {
2516 aead_unmap(jrdev, edesc, req);
2517 kfree(edesc);
2518 }
2519
2520 return ret;
2521 }
2522
gcm_decrypt(struct aead_request * req)2523 static int gcm_decrypt(struct aead_request *req)
2524 {
2525 struct aead_edesc *edesc;
2526 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2527 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2528 struct device *jrdev = ctx->jrdev;
2529 bool all_contig;
2530 u32 *desc;
2531 int ret = 0;
2532
2533 /* allocate extended descriptor */
2534 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
2535 if (IS_ERR(edesc))
2536 return PTR_ERR(edesc);
2537
2538 /* Create and submit job descriptor*/
2539 init_gcm_job(req, edesc, all_contig, false);
2540 #ifdef DEBUG
2541 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2542 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2543 desc_bytes(edesc->hw_desc), 1);
2544 #endif
2545
2546 desc = edesc->hw_desc;
2547 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2548 if (!ret) {
2549 ret = -EINPROGRESS;
2550 } else {
2551 aead_unmap(jrdev, edesc, req);
2552 kfree(edesc);
2553 }
2554
2555 return ret;
2556 }
2557
ipsec_gcm_decrypt(struct aead_request * req)2558 static int ipsec_gcm_decrypt(struct aead_request *req)
2559 {
2560 if (req->assoclen < 8)
2561 return -EINVAL;
2562
2563 return gcm_decrypt(req);
2564 }
2565
aead_decrypt(struct aead_request * req)2566 static int aead_decrypt(struct aead_request *req)
2567 {
2568 struct aead_edesc *edesc;
2569 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2570 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2571 struct device *jrdev = ctx->jrdev;
2572 bool all_contig;
2573 u32 *desc;
2574 int ret = 0;
2575
2576 #ifdef DEBUG
2577 bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2578 CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
2579 dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
2580 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
2581 req->assoclen + req->cryptlen, 1, may_sleep);
2582 #endif
2583
2584 /* allocate extended descriptor */
2585 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2586 &all_contig, false);
2587 if (IS_ERR(edesc))
2588 return PTR_ERR(edesc);
2589
2590 /* Create and submit job descriptor*/
2591 init_authenc_job(req, edesc, all_contig, false);
2592 #ifdef DEBUG
2593 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2594 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2595 desc_bytes(edesc->hw_desc), 1);
2596 #endif
2597
2598 desc = edesc->hw_desc;
2599 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2600 if (!ret) {
2601 ret = -EINPROGRESS;
2602 } else {
2603 aead_unmap(jrdev, edesc, req);
2604 kfree(edesc);
2605 }
2606
2607 return ret;
2608 }
2609
2610 /*
2611 * allocate and map the ablkcipher extended descriptor for ablkcipher
2612 */
ablkcipher_edesc_alloc(struct ablkcipher_request * req,int desc_bytes,bool * iv_contig_out)2613 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2614 *req, int desc_bytes,
2615 bool *iv_contig_out)
2616 {
2617 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2618 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2619 struct device *jrdev = ctx->jrdev;
2620 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
2621 GFP_KERNEL : GFP_ATOMIC;
2622 int src_nents, dst_nents = 0, sec4_sg_bytes;
2623 struct ablkcipher_edesc *edesc;
2624 dma_addr_t iv_dma = 0;
2625 bool iv_contig = false;
2626 int sgc;
2627 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2628 int sec4_sg_index;
2629
2630 src_nents = sg_count(req->src, req->nbytes);
2631
2632 if (req->dst != req->src)
2633 dst_nents = sg_count(req->dst, req->nbytes);
2634
2635 if (likely(req->src == req->dst)) {
2636 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2637 DMA_BIDIRECTIONAL);
2638 } else {
2639 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2640 DMA_TO_DEVICE);
2641 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2642 DMA_FROM_DEVICE);
2643 }
2644
2645 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
2646 if (dma_mapping_error(jrdev, iv_dma)) {
2647 dev_err(jrdev, "unable to map IV\n");
2648 return ERR_PTR(-ENOMEM);
2649 }
2650
2651 /*
2652 * Check if iv can be contiguous with source and destination.
2653 * If so, include it. If not, create scatterlist.
2654 */
2655 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
2656 iv_contig = true;
2657 else
2658 src_nents = src_nents ? : 1;
2659 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2660 sizeof(struct sec4_sg_entry);
2661
2662 /* allocate space for base edesc and hw desc commands, link tables */
2663 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2664 GFP_DMA | flags);
2665 if (!edesc) {
2666 dev_err(jrdev, "could not allocate extended descriptor\n");
2667 return ERR_PTR(-ENOMEM);
2668 }
2669
2670 edesc->src_nents = src_nents;
2671 edesc->dst_nents = dst_nents;
2672 edesc->sec4_sg_bytes = sec4_sg_bytes;
2673 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2674 desc_bytes;
2675
2676 sec4_sg_index = 0;
2677 if (!iv_contig) {
2678 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2679 sg_to_sec4_sg_last(req->src, src_nents,
2680 edesc->sec4_sg + 1, 0);
2681 sec4_sg_index += 1 + src_nents;
2682 }
2683
2684 if (dst_nents) {
2685 sg_to_sec4_sg_last(req->dst, dst_nents,
2686 edesc->sec4_sg + sec4_sg_index, 0);
2687 }
2688
2689 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2690 sec4_sg_bytes, DMA_TO_DEVICE);
2691 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2692 dev_err(jrdev, "unable to map S/G table\n");
2693 return ERR_PTR(-ENOMEM);
2694 }
2695
2696 edesc->iv_dma = iv_dma;
2697
2698 #ifdef DEBUG
2699 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
2700 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2701 sec4_sg_bytes, 1);
2702 #endif
2703
2704 *iv_contig_out = iv_contig;
2705 return edesc;
2706 }
2707
ablkcipher_encrypt(struct ablkcipher_request * req)2708 static int ablkcipher_encrypt(struct ablkcipher_request *req)
2709 {
2710 struct ablkcipher_edesc *edesc;
2711 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2712 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2713 struct device *jrdev = ctx->jrdev;
2714 bool iv_contig;
2715 u32 *desc;
2716 int ret = 0;
2717
2718 /* allocate extended descriptor */
2719 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2720 CAAM_CMD_SZ, &iv_contig);
2721 if (IS_ERR(edesc))
2722 return PTR_ERR(edesc);
2723
2724 /* Create and submit job descriptor*/
2725 init_ablkcipher_job(ctx->sh_desc_enc,
2726 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
2727 #ifdef DEBUG
2728 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
2729 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2730 desc_bytes(edesc->hw_desc), 1);
2731 #endif
2732 desc = edesc->hw_desc;
2733 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2734
2735 if (!ret) {
2736 ret = -EINPROGRESS;
2737 } else {
2738 ablkcipher_unmap(jrdev, edesc, req);
2739 kfree(edesc);
2740 }
2741
2742 return ret;
2743 }
2744
ablkcipher_decrypt(struct ablkcipher_request * req)2745 static int ablkcipher_decrypt(struct ablkcipher_request *req)
2746 {
2747 struct ablkcipher_edesc *edesc;
2748 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2749 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2750 struct device *jrdev = ctx->jrdev;
2751 bool iv_contig;
2752 u32 *desc;
2753 int ret = 0;
2754
2755 /* allocate extended descriptor */
2756 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2757 CAAM_CMD_SZ, &iv_contig);
2758 if (IS_ERR(edesc))
2759 return PTR_ERR(edesc);
2760
2761 /* Create and submit job descriptor*/
2762 init_ablkcipher_job(ctx->sh_desc_dec,
2763 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
2764 desc = edesc->hw_desc;
2765 #ifdef DEBUG
2766 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
2767 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2768 desc_bytes(edesc->hw_desc), 1);
2769 #endif
2770
2771 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
2772 if (!ret) {
2773 ret = -EINPROGRESS;
2774 } else {
2775 ablkcipher_unmap(jrdev, edesc, req);
2776 kfree(edesc);
2777 }
2778
2779 return ret;
2780 }
2781
2782 /*
2783 * allocate and map the ablkcipher extended descriptor
2784 * for ablkcipher givencrypt
2785 */
ablkcipher_giv_edesc_alloc(struct skcipher_givcrypt_request * greq,int desc_bytes,bool * iv_contig_out)2786 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
2787 struct skcipher_givcrypt_request *greq,
2788 int desc_bytes,
2789 bool *iv_contig_out)
2790 {
2791 struct ablkcipher_request *req = &greq->creq;
2792 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2793 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2794 struct device *jrdev = ctx->jrdev;
2795 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2796 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2797 GFP_KERNEL : GFP_ATOMIC;
2798 int src_nents, dst_nents = 0, sec4_sg_bytes;
2799 struct ablkcipher_edesc *edesc;
2800 dma_addr_t iv_dma = 0;
2801 bool iv_contig = false;
2802 int sgc;
2803 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2804 int sec4_sg_index;
2805
2806 src_nents = sg_count(req->src, req->nbytes);
2807
2808 if (unlikely(req->dst != req->src))
2809 dst_nents = sg_count(req->dst, req->nbytes);
2810
2811 if (likely(req->src == req->dst)) {
2812 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2813 DMA_BIDIRECTIONAL);
2814 } else {
2815 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2816 DMA_TO_DEVICE);
2817 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2818 DMA_FROM_DEVICE);
2819 }
2820
2821 /*
2822 * Check if iv can be contiguous with source and destination.
2823 * If so, include it. If not, create scatterlist.
2824 */
2825 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
2826 if (dma_mapping_error(jrdev, iv_dma)) {
2827 dev_err(jrdev, "unable to map IV\n");
2828 return ERR_PTR(-ENOMEM);
2829 }
2830
2831 if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
2832 iv_contig = true;
2833 else
2834 dst_nents = dst_nents ? : 1;
2835 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2836 sizeof(struct sec4_sg_entry);
2837
2838 /* allocate space for base edesc and hw desc commands, link tables */
2839 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2840 GFP_DMA | flags);
2841 if (!edesc) {
2842 dev_err(jrdev, "could not allocate extended descriptor\n");
2843 return ERR_PTR(-ENOMEM);
2844 }
2845
2846 edesc->src_nents = src_nents;
2847 edesc->dst_nents = dst_nents;
2848 edesc->sec4_sg_bytes = sec4_sg_bytes;
2849 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2850 desc_bytes;
2851
2852 sec4_sg_index = 0;
2853 if (src_nents) {
2854 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
2855 sec4_sg_index += src_nents;
2856 }
2857
2858 if (!iv_contig) {
2859 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2860 iv_dma, ivsize, 0);
2861 sec4_sg_index += 1;
2862 sg_to_sec4_sg_last(req->dst, dst_nents,
2863 edesc->sec4_sg + sec4_sg_index, 0);
2864 }
2865
2866 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2867 sec4_sg_bytes, DMA_TO_DEVICE);
2868 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2869 dev_err(jrdev, "unable to map S/G table\n");
2870 return ERR_PTR(-ENOMEM);
2871 }
2872 edesc->iv_dma = iv_dma;
2873
2874 #ifdef DEBUG
2875 print_hex_dump(KERN_ERR,
2876 "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
2877 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2878 sec4_sg_bytes, 1);
2879 #endif
2880
2881 *iv_contig_out = iv_contig;
2882 return edesc;
2883 }
2884
ablkcipher_givencrypt(struct skcipher_givcrypt_request * creq)2885 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
2886 {
2887 struct ablkcipher_request *req = &creq->creq;
2888 struct ablkcipher_edesc *edesc;
2889 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2890 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2891 struct device *jrdev = ctx->jrdev;
2892 bool iv_contig;
2893 u32 *desc;
2894 int ret = 0;
2895
2896 /* allocate extended descriptor */
2897 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
2898 CAAM_CMD_SZ, &iv_contig);
2899 if (IS_ERR(edesc))
2900 return PTR_ERR(edesc);
2901
2902 /* Create and submit job descriptor*/
2903 init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
2904 edesc, req, iv_contig);
2905 #ifdef DEBUG
2906 print_hex_dump(KERN_ERR,
2907 "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
2908 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2909 desc_bytes(edesc->hw_desc), 1);
2910 #endif
2911 desc = edesc->hw_desc;
2912 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2913
2914 if (!ret) {
2915 ret = -EINPROGRESS;
2916 } else {
2917 ablkcipher_unmap(jrdev, edesc, req);
2918 kfree(edesc);
2919 }
2920
2921 return ret;
2922 }
2923
2924 #define template_aead template_u.aead
2925 #define template_ablkcipher template_u.ablkcipher
2926 struct caam_alg_template {
2927 char name[CRYPTO_MAX_ALG_NAME];
2928 char driver_name[CRYPTO_MAX_ALG_NAME];
2929 unsigned int blocksize;
2930 u32 type;
2931 union {
2932 struct ablkcipher_alg ablkcipher;
2933 } template_u;
2934 u32 class1_alg_type;
2935 u32 class2_alg_type;
2936 u32 alg_op;
2937 };
2938
2939 static struct caam_alg_template driver_algs[] = {
2940 /* ablkcipher descriptor */
2941 {
2942 .name = "cbc(aes)",
2943 .driver_name = "cbc-aes-caam",
2944 .blocksize = AES_BLOCK_SIZE,
2945 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
2946 .template_ablkcipher = {
2947 .setkey = ablkcipher_setkey,
2948 .encrypt = ablkcipher_encrypt,
2949 .decrypt = ablkcipher_decrypt,
2950 .givencrypt = ablkcipher_givencrypt,
2951 .geniv = "<built-in>",
2952 .min_keysize = AES_MIN_KEY_SIZE,
2953 .max_keysize = AES_MAX_KEY_SIZE,
2954 .ivsize = AES_BLOCK_SIZE,
2955 },
2956 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2957 },
2958 {
2959 .name = "cbc(des3_ede)",
2960 .driver_name = "cbc-3des-caam",
2961 .blocksize = DES3_EDE_BLOCK_SIZE,
2962 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
2963 .template_ablkcipher = {
2964 .setkey = ablkcipher_setkey,
2965 .encrypt = ablkcipher_encrypt,
2966 .decrypt = ablkcipher_decrypt,
2967 .givencrypt = ablkcipher_givencrypt,
2968 .geniv = "<built-in>",
2969 .min_keysize = DES3_EDE_KEY_SIZE,
2970 .max_keysize = DES3_EDE_KEY_SIZE,
2971 .ivsize = DES3_EDE_BLOCK_SIZE,
2972 },
2973 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2974 },
2975 {
2976 .name = "cbc(des)",
2977 .driver_name = "cbc-des-caam",
2978 .blocksize = DES_BLOCK_SIZE,
2979 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
2980 .template_ablkcipher = {
2981 .setkey = ablkcipher_setkey,
2982 .encrypt = ablkcipher_encrypt,
2983 .decrypt = ablkcipher_decrypt,
2984 .givencrypt = ablkcipher_givencrypt,
2985 .geniv = "<built-in>",
2986 .min_keysize = DES_KEY_SIZE,
2987 .max_keysize = DES_KEY_SIZE,
2988 .ivsize = DES_BLOCK_SIZE,
2989 },
2990 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2991 },
2992 {
2993 .name = "ctr(aes)",
2994 .driver_name = "ctr-aes-caam",
2995 .blocksize = 1,
2996 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2997 .template_ablkcipher = {
2998 .setkey = ablkcipher_setkey,
2999 .encrypt = ablkcipher_encrypt,
3000 .decrypt = ablkcipher_decrypt,
3001 .geniv = "chainiv",
3002 .min_keysize = AES_MIN_KEY_SIZE,
3003 .max_keysize = AES_MAX_KEY_SIZE,
3004 .ivsize = AES_BLOCK_SIZE,
3005 },
3006 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3007 },
3008 {
3009 .name = "rfc3686(ctr(aes))",
3010 .driver_name = "rfc3686-ctr-aes-caam",
3011 .blocksize = 1,
3012 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
3013 .template_ablkcipher = {
3014 .setkey = ablkcipher_setkey,
3015 .encrypt = ablkcipher_encrypt,
3016 .decrypt = ablkcipher_decrypt,
3017 .givencrypt = ablkcipher_givencrypt,
3018 .geniv = "<built-in>",
3019 .min_keysize = AES_MIN_KEY_SIZE +
3020 CTR_RFC3686_NONCE_SIZE,
3021 .max_keysize = AES_MAX_KEY_SIZE +
3022 CTR_RFC3686_NONCE_SIZE,
3023 .ivsize = CTR_RFC3686_IV_SIZE,
3024 },
3025 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3026 },
3027 {
3028 .name = "xts(aes)",
3029 .driver_name = "xts-aes-caam",
3030 .blocksize = AES_BLOCK_SIZE,
3031 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3032 .template_ablkcipher = {
3033 .setkey = xts_ablkcipher_setkey,
3034 .encrypt = ablkcipher_encrypt,
3035 .decrypt = ablkcipher_decrypt,
3036 .geniv = "eseqiv",
3037 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3038 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3039 .ivsize = AES_BLOCK_SIZE,
3040 },
3041 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
3042 },
3043 };
3044
3045 static struct caam_aead_alg driver_aeads[] = {
3046 {
3047 .aead = {
3048 .base = {
3049 .cra_name = "rfc4106(gcm(aes))",
3050 .cra_driver_name = "rfc4106-gcm-aes-caam",
3051 .cra_blocksize = 1,
3052 },
3053 .setkey = rfc4106_setkey,
3054 .setauthsize = rfc4106_setauthsize,
3055 .encrypt = ipsec_gcm_encrypt,
3056 .decrypt = ipsec_gcm_decrypt,
3057 .ivsize = 8,
3058 .maxauthsize = AES_BLOCK_SIZE,
3059 },
3060 .caam = {
3061 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3062 },
3063 },
3064 {
3065 .aead = {
3066 .base = {
3067 .cra_name = "rfc4543(gcm(aes))",
3068 .cra_driver_name = "rfc4543-gcm-aes-caam",
3069 .cra_blocksize = 1,
3070 },
3071 .setkey = rfc4543_setkey,
3072 .setauthsize = rfc4543_setauthsize,
3073 .encrypt = ipsec_gcm_encrypt,
3074 .decrypt = ipsec_gcm_decrypt,
3075 .ivsize = 8,
3076 .maxauthsize = AES_BLOCK_SIZE,
3077 },
3078 .caam = {
3079 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3080 },
3081 },
3082 /* Galois Counter Mode */
3083 {
3084 .aead = {
3085 .base = {
3086 .cra_name = "gcm(aes)",
3087 .cra_driver_name = "gcm-aes-caam",
3088 .cra_blocksize = 1,
3089 },
3090 .setkey = gcm_setkey,
3091 .setauthsize = gcm_setauthsize,
3092 .encrypt = gcm_encrypt,
3093 .decrypt = gcm_decrypt,
3094 .ivsize = 12,
3095 .maxauthsize = AES_BLOCK_SIZE,
3096 },
3097 .caam = {
3098 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3099 },
3100 },
3101 /* single-pass ipsec_esp descriptor */
3102 {
3103 .aead = {
3104 .base = {
3105 .cra_name = "authenc(hmac(md5),"
3106 "ecb(cipher_null))",
3107 .cra_driver_name = "authenc-hmac-md5-"
3108 "ecb-cipher_null-caam",
3109 .cra_blocksize = NULL_BLOCK_SIZE,
3110 },
3111 .setkey = aead_setkey,
3112 .setauthsize = aead_setauthsize,
3113 .encrypt = aead_encrypt,
3114 .decrypt = aead_decrypt,
3115 .ivsize = NULL_IV_SIZE,
3116 .maxauthsize = MD5_DIGEST_SIZE,
3117 },
3118 .caam = {
3119 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3120 OP_ALG_AAI_HMAC_PRECOMP,
3121 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3122 },
3123 },
3124 {
3125 .aead = {
3126 .base = {
3127 .cra_name = "authenc(hmac(sha1),"
3128 "ecb(cipher_null))",
3129 .cra_driver_name = "authenc-hmac-sha1-"
3130 "ecb-cipher_null-caam",
3131 .cra_blocksize = NULL_BLOCK_SIZE,
3132 },
3133 .setkey = aead_setkey,
3134 .setauthsize = aead_setauthsize,
3135 .encrypt = aead_encrypt,
3136 .decrypt = aead_decrypt,
3137 .ivsize = NULL_IV_SIZE,
3138 .maxauthsize = SHA1_DIGEST_SIZE,
3139 },
3140 .caam = {
3141 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3142 OP_ALG_AAI_HMAC_PRECOMP,
3143 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3144 },
3145 },
3146 {
3147 .aead = {
3148 .base = {
3149 .cra_name = "authenc(hmac(sha224),"
3150 "ecb(cipher_null))",
3151 .cra_driver_name = "authenc-hmac-sha224-"
3152 "ecb-cipher_null-caam",
3153 .cra_blocksize = NULL_BLOCK_SIZE,
3154 },
3155 .setkey = aead_setkey,
3156 .setauthsize = aead_setauthsize,
3157 .encrypt = aead_encrypt,
3158 .decrypt = aead_decrypt,
3159 .ivsize = NULL_IV_SIZE,
3160 .maxauthsize = SHA224_DIGEST_SIZE,
3161 },
3162 .caam = {
3163 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3164 OP_ALG_AAI_HMAC_PRECOMP,
3165 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3166 },
3167 },
3168 {
3169 .aead = {
3170 .base = {
3171 .cra_name = "authenc(hmac(sha256),"
3172 "ecb(cipher_null))",
3173 .cra_driver_name = "authenc-hmac-sha256-"
3174 "ecb-cipher_null-caam",
3175 .cra_blocksize = NULL_BLOCK_SIZE,
3176 },
3177 .setkey = aead_setkey,
3178 .setauthsize = aead_setauthsize,
3179 .encrypt = aead_encrypt,
3180 .decrypt = aead_decrypt,
3181 .ivsize = NULL_IV_SIZE,
3182 .maxauthsize = SHA256_DIGEST_SIZE,
3183 },
3184 .caam = {
3185 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3186 OP_ALG_AAI_HMAC_PRECOMP,
3187 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3188 },
3189 },
3190 {
3191 .aead = {
3192 .base = {
3193 .cra_name = "authenc(hmac(sha384),"
3194 "ecb(cipher_null))",
3195 .cra_driver_name = "authenc-hmac-sha384-"
3196 "ecb-cipher_null-caam",
3197 .cra_blocksize = NULL_BLOCK_SIZE,
3198 },
3199 .setkey = aead_setkey,
3200 .setauthsize = aead_setauthsize,
3201 .encrypt = aead_encrypt,
3202 .decrypt = aead_decrypt,
3203 .ivsize = NULL_IV_SIZE,
3204 .maxauthsize = SHA384_DIGEST_SIZE,
3205 },
3206 .caam = {
3207 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3208 OP_ALG_AAI_HMAC_PRECOMP,
3209 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3210 },
3211 },
3212 {
3213 .aead = {
3214 .base = {
3215 .cra_name = "authenc(hmac(sha512),"
3216 "ecb(cipher_null))",
3217 .cra_driver_name = "authenc-hmac-sha512-"
3218 "ecb-cipher_null-caam",
3219 .cra_blocksize = NULL_BLOCK_SIZE,
3220 },
3221 .setkey = aead_setkey,
3222 .setauthsize = aead_setauthsize,
3223 .encrypt = aead_encrypt,
3224 .decrypt = aead_decrypt,
3225 .ivsize = NULL_IV_SIZE,
3226 .maxauthsize = SHA512_DIGEST_SIZE,
3227 },
3228 .caam = {
3229 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3230 OP_ALG_AAI_HMAC_PRECOMP,
3231 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3232 },
3233 },
3234 {
3235 .aead = {
3236 .base = {
3237 .cra_name = "authenc(hmac(md5),cbc(aes))",
3238 .cra_driver_name = "authenc-hmac-md5-"
3239 "cbc-aes-caam",
3240 .cra_blocksize = AES_BLOCK_SIZE,
3241 },
3242 .setkey = aead_setkey,
3243 .setauthsize = aead_setauthsize,
3244 .encrypt = aead_encrypt,
3245 .decrypt = aead_decrypt,
3246 .ivsize = AES_BLOCK_SIZE,
3247 .maxauthsize = MD5_DIGEST_SIZE,
3248 },
3249 .caam = {
3250 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3251 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3252 OP_ALG_AAI_HMAC_PRECOMP,
3253 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3254 },
3255 },
3256 {
3257 .aead = {
3258 .base = {
3259 .cra_name = "echainiv(authenc(hmac(md5),"
3260 "cbc(aes)))",
3261 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3262 "cbc-aes-caam",
3263 .cra_blocksize = AES_BLOCK_SIZE,
3264 },
3265 .setkey = aead_setkey,
3266 .setauthsize = aead_setauthsize,
3267 .encrypt = aead_encrypt,
3268 .decrypt = aead_decrypt,
3269 .ivsize = AES_BLOCK_SIZE,
3270 .maxauthsize = MD5_DIGEST_SIZE,
3271 },
3272 .caam = {
3273 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3274 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3275 OP_ALG_AAI_HMAC_PRECOMP,
3276 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3277 .geniv = true,
3278 },
3279 },
3280 {
3281 .aead = {
3282 .base = {
3283 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3284 .cra_driver_name = "authenc-hmac-sha1-"
3285 "cbc-aes-caam",
3286 .cra_blocksize = AES_BLOCK_SIZE,
3287 },
3288 .setkey = aead_setkey,
3289 .setauthsize = aead_setauthsize,
3290 .encrypt = aead_encrypt,
3291 .decrypt = aead_decrypt,
3292 .ivsize = AES_BLOCK_SIZE,
3293 .maxauthsize = SHA1_DIGEST_SIZE,
3294 },
3295 .caam = {
3296 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3297 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3298 OP_ALG_AAI_HMAC_PRECOMP,
3299 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3300 },
3301 },
3302 {
3303 .aead = {
3304 .base = {
3305 .cra_name = "echainiv(authenc(hmac(sha1),"
3306 "cbc(aes)))",
3307 .cra_driver_name = "echainiv-authenc-"
3308 "hmac-sha1-cbc-aes-caam",
3309 .cra_blocksize = AES_BLOCK_SIZE,
3310 },
3311 .setkey = aead_setkey,
3312 .setauthsize = aead_setauthsize,
3313 .encrypt = aead_encrypt,
3314 .decrypt = aead_decrypt,
3315 .ivsize = AES_BLOCK_SIZE,
3316 .maxauthsize = SHA1_DIGEST_SIZE,
3317 },
3318 .caam = {
3319 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3320 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3321 OP_ALG_AAI_HMAC_PRECOMP,
3322 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3323 .geniv = true,
3324 },
3325 },
3326 {
3327 .aead = {
3328 .base = {
3329 .cra_name = "authenc(hmac(sha224),cbc(aes))",
3330 .cra_driver_name = "authenc-hmac-sha224-"
3331 "cbc-aes-caam",
3332 .cra_blocksize = AES_BLOCK_SIZE,
3333 },
3334 .setkey = aead_setkey,
3335 .setauthsize = aead_setauthsize,
3336 .encrypt = aead_encrypt,
3337 .decrypt = aead_decrypt,
3338 .ivsize = AES_BLOCK_SIZE,
3339 .maxauthsize = SHA224_DIGEST_SIZE,
3340 },
3341 .caam = {
3342 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3343 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3344 OP_ALG_AAI_HMAC_PRECOMP,
3345 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3346 },
3347 },
3348 {
3349 .aead = {
3350 .base = {
3351 .cra_name = "echainiv(authenc(hmac(sha224),"
3352 "cbc(aes)))",
3353 .cra_driver_name = "echainiv-authenc-"
3354 "hmac-sha224-cbc-aes-caam",
3355 .cra_blocksize = AES_BLOCK_SIZE,
3356 },
3357 .setkey = aead_setkey,
3358 .setauthsize = aead_setauthsize,
3359 .encrypt = aead_encrypt,
3360 .decrypt = aead_decrypt,
3361 .ivsize = AES_BLOCK_SIZE,
3362 .maxauthsize = SHA224_DIGEST_SIZE,
3363 },
3364 .caam = {
3365 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3366 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3367 OP_ALG_AAI_HMAC_PRECOMP,
3368 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3369 .geniv = true,
3370 },
3371 },
3372 {
3373 .aead = {
3374 .base = {
3375 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3376 .cra_driver_name = "authenc-hmac-sha256-"
3377 "cbc-aes-caam",
3378 .cra_blocksize = AES_BLOCK_SIZE,
3379 },
3380 .setkey = aead_setkey,
3381 .setauthsize = aead_setauthsize,
3382 .encrypt = aead_encrypt,
3383 .decrypt = aead_decrypt,
3384 .ivsize = AES_BLOCK_SIZE,
3385 .maxauthsize = SHA256_DIGEST_SIZE,
3386 },
3387 .caam = {
3388 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3389 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3390 OP_ALG_AAI_HMAC_PRECOMP,
3391 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3392 },
3393 },
3394 {
3395 .aead = {
3396 .base = {
3397 .cra_name = "echainiv(authenc(hmac(sha256),"
3398 "cbc(aes)))",
3399 .cra_driver_name = "echainiv-authenc-"
3400 "hmac-sha256-cbc-aes-caam",
3401 .cra_blocksize = AES_BLOCK_SIZE,
3402 },
3403 .setkey = aead_setkey,
3404 .setauthsize = aead_setauthsize,
3405 .encrypt = aead_encrypt,
3406 .decrypt = aead_decrypt,
3407 .ivsize = AES_BLOCK_SIZE,
3408 .maxauthsize = SHA256_DIGEST_SIZE,
3409 },
3410 .caam = {
3411 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3412 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3413 OP_ALG_AAI_HMAC_PRECOMP,
3414 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3415 .geniv = true,
3416 },
3417 },
3418 {
3419 .aead = {
3420 .base = {
3421 .cra_name = "authenc(hmac(sha384),cbc(aes))",
3422 .cra_driver_name = "authenc-hmac-sha384-"
3423 "cbc-aes-caam",
3424 .cra_blocksize = AES_BLOCK_SIZE,
3425 },
3426 .setkey = aead_setkey,
3427 .setauthsize = aead_setauthsize,
3428 .encrypt = aead_encrypt,
3429 .decrypt = aead_decrypt,
3430 .ivsize = AES_BLOCK_SIZE,
3431 .maxauthsize = SHA384_DIGEST_SIZE,
3432 },
3433 .caam = {
3434 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3435 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3436 OP_ALG_AAI_HMAC_PRECOMP,
3437 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3438 },
3439 },
3440 {
3441 .aead = {
3442 .base = {
3443 .cra_name = "echainiv(authenc(hmac(sha384),"
3444 "cbc(aes)))",
3445 .cra_driver_name = "echainiv-authenc-"
3446 "hmac-sha384-cbc-aes-caam",
3447 .cra_blocksize = AES_BLOCK_SIZE,
3448 },
3449 .setkey = aead_setkey,
3450 .setauthsize = aead_setauthsize,
3451 .encrypt = aead_encrypt,
3452 .decrypt = aead_decrypt,
3453 .ivsize = AES_BLOCK_SIZE,
3454 .maxauthsize = SHA384_DIGEST_SIZE,
3455 },
3456 .caam = {
3457 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3458 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3459 OP_ALG_AAI_HMAC_PRECOMP,
3460 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3461 .geniv = true,
3462 },
3463 },
3464 {
3465 .aead = {
3466 .base = {
3467 .cra_name = "authenc(hmac(sha512),cbc(aes))",
3468 .cra_driver_name = "authenc-hmac-sha512-"
3469 "cbc-aes-caam",
3470 .cra_blocksize = AES_BLOCK_SIZE,
3471 },
3472 .setkey = aead_setkey,
3473 .setauthsize = aead_setauthsize,
3474 .encrypt = aead_encrypt,
3475 .decrypt = aead_decrypt,
3476 .ivsize = AES_BLOCK_SIZE,
3477 .maxauthsize = SHA512_DIGEST_SIZE,
3478 },
3479 .caam = {
3480 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3481 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3482 OP_ALG_AAI_HMAC_PRECOMP,
3483 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3484 },
3485 },
3486 {
3487 .aead = {
3488 .base = {
3489 .cra_name = "echainiv(authenc(hmac(sha512),"
3490 "cbc(aes)))",
3491 .cra_driver_name = "echainiv-authenc-"
3492 "hmac-sha512-cbc-aes-caam",
3493 .cra_blocksize = AES_BLOCK_SIZE,
3494 },
3495 .setkey = aead_setkey,
3496 .setauthsize = aead_setauthsize,
3497 .encrypt = aead_encrypt,
3498 .decrypt = aead_decrypt,
3499 .ivsize = AES_BLOCK_SIZE,
3500 .maxauthsize = SHA512_DIGEST_SIZE,
3501 },
3502 .caam = {
3503 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3504 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3505 OP_ALG_AAI_HMAC_PRECOMP,
3506 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3507 .geniv = true,
3508 },
3509 },
3510 {
3511 .aead = {
3512 .base = {
3513 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3514 .cra_driver_name = "authenc-hmac-md5-"
3515 "cbc-des3_ede-caam",
3516 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3517 },
3518 .setkey = aead_setkey,
3519 .setauthsize = aead_setauthsize,
3520 .encrypt = aead_encrypt,
3521 .decrypt = aead_decrypt,
3522 .ivsize = DES3_EDE_BLOCK_SIZE,
3523 .maxauthsize = MD5_DIGEST_SIZE,
3524 },
3525 .caam = {
3526 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3527 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3528 OP_ALG_AAI_HMAC_PRECOMP,
3529 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3530 }
3531 },
3532 {
3533 .aead = {
3534 .base = {
3535 .cra_name = "echainiv(authenc(hmac(md5),"
3536 "cbc(des3_ede)))",
3537 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3538 "cbc-des3_ede-caam",
3539 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3540 },
3541 .setkey = aead_setkey,
3542 .setauthsize = aead_setauthsize,
3543 .encrypt = aead_encrypt,
3544 .decrypt = aead_decrypt,
3545 .ivsize = DES3_EDE_BLOCK_SIZE,
3546 .maxauthsize = MD5_DIGEST_SIZE,
3547 },
3548 .caam = {
3549 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3550 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3551 OP_ALG_AAI_HMAC_PRECOMP,
3552 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3553 .geniv = true,
3554 }
3555 },
3556 {
3557 .aead = {
3558 .base = {
3559 .cra_name = "authenc(hmac(sha1),"
3560 "cbc(des3_ede))",
3561 .cra_driver_name = "authenc-hmac-sha1-"
3562 "cbc-des3_ede-caam",
3563 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3564 },
3565 .setkey = aead_setkey,
3566 .setauthsize = aead_setauthsize,
3567 .encrypt = aead_encrypt,
3568 .decrypt = aead_decrypt,
3569 .ivsize = DES3_EDE_BLOCK_SIZE,
3570 .maxauthsize = SHA1_DIGEST_SIZE,
3571 },
3572 .caam = {
3573 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3574 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3575 OP_ALG_AAI_HMAC_PRECOMP,
3576 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3577 },
3578 },
3579 {
3580 .aead = {
3581 .base = {
3582 .cra_name = "echainiv(authenc(hmac(sha1),"
3583 "cbc(des3_ede)))",
3584 .cra_driver_name = "echainiv-authenc-"
3585 "hmac-sha1-"
3586 "cbc-des3_ede-caam",
3587 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3588 },
3589 .setkey = aead_setkey,
3590 .setauthsize = aead_setauthsize,
3591 .encrypt = aead_encrypt,
3592 .decrypt = aead_decrypt,
3593 .ivsize = DES3_EDE_BLOCK_SIZE,
3594 .maxauthsize = SHA1_DIGEST_SIZE,
3595 },
3596 .caam = {
3597 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3598 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3599 OP_ALG_AAI_HMAC_PRECOMP,
3600 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3601 .geniv = true,
3602 },
3603 },
3604 {
3605 .aead = {
3606 .base = {
3607 .cra_name = "authenc(hmac(sha224),"
3608 "cbc(des3_ede))",
3609 .cra_driver_name = "authenc-hmac-sha224-"
3610 "cbc-des3_ede-caam",
3611 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3612 },
3613 .setkey = aead_setkey,
3614 .setauthsize = aead_setauthsize,
3615 .encrypt = aead_encrypt,
3616 .decrypt = aead_decrypt,
3617 .ivsize = DES3_EDE_BLOCK_SIZE,
3618 .maxauthsize = SHA224_DIGEST_SIZE,
3619 },
3620 .caam = {
3621 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3622 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3623 OP_ALG_AAI_HMAC_PRECOMP,
3624 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3625 },
3626 },
3627 {
3628 .aead = {
3629 .base = {
3630 .cra_name = "echainiv(authenc(hmac(sha224),"
3631 "cbc(des3_ede)))",
3632 .cra_driver_name = "echainiv-authenc-"
3633 "hmac-sha224-"
3634 "cbc-des3_ede-caam",
3635 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3636 },
3637 .setkey = aead_setkey,
3638 .setauthsize = aead_setauthsize,
3639 .encrypt = aead_encrypt,
3640 .decrypt = aead_decrypt,
3641 .ivsize = DES3_EDE_BLOCK_SIZE,
3642 .maxauthsize = SHA224_DIGEST_SIZE,
3643 },
3644 .caam = {
3645 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3646 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3647 OP_ALG_AAI_HMAC_PRECOMP,
3648 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3649 .geniv = true,
3650 },
3651 },
3652 {
3653 .aead = {
3654 .base = {
3655 .cra_name = "authenc(hmac(sha256),"
3656 "cbc(des3_ede))",
3657 .cra_driver_name = "authenc-hmac-sha256-"
3658 "cbc-des3_ede-caam",
3659 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3660 },
3661 .setkey = aead_setkey,
3662 .setauthsize = aead_setauthsize,
3663 .encrypt = aead_encrypt,
3664 .decrypt = aead_decrypt,
3665 .ivsize = DES3_EDE_BLOCK_SIZE,
3666 .maxauthsize = SHA256_DIGEST_SIZE,
3667 },
3668 .caam = {
3669 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3670 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3671 OP_ALG_AAI_HMAC_PRECOMP,
3672 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3673 },
3674 },
3675 {
3676 .aead = {
3677 .base = {
3678 .cra_name = "echainiv(authenc(hmac(sha256),"
3679 "cbc(des3_ede)))",
3680 .cra_driver_name = "echainiv-authenc-"
3681 "hmac-sha256-"
3682 "cbc-des3_ede-caam",
3683 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3684 },
3685 .setkey = aead_setkey,
3686 .setauthsize = aead_setauthsize,
3687 .encrypt = aead_encrypt,
3688 .decrypt = aead_decrypt,
3689 .ivsize = DES3_EDE_BLOCK_SIZE,
3690 .maxauthsize = SHA256_DIGEST_SIZE,
3691 },
3692 .caam = {
3693 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3694 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3695 OP_ALG_AAI_HMAC_PRECOMP,
3696 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3697 .geniv = true,
3698 },
3699 },
3700 {
3701 .aead = {
3702 .base = {
3703 .cra_name = "authenc(hmac(sha384),"
3704 "cbc(des3_ede))",
3705 .cra_driver_name = "authenc-hmac-sha384-"
3706 "cbc-des3_ede-caam",
3707 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3708 },
3709 .setkey = aead_setkey,
3710 .setauthsize = aead_setauthsize,
3711 .encrypt = aead_encrypt,
3712 .decrypt = aead_decrypt,
3713 .ivsize = DES3_EDE_BLOCK_SIZE,
3714 .maxauthsize = SHA384_DIGEST_SIZE,
3715 },
3716 .caam = {
3717 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3718 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3719 OP_ALG_AAI_HMAC_PRECOMP,
3720 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3721 },
3722 },
3723 {
3724 .aead = {
3725 .base = {
3726 .cra_name = "echainiv(authenc(hmac(sha384),"
3727 "cbc(des3_ede)))",
3728 .cra_driver_name = "echainiv-authenc-"
3729 "hmac-sha384-"
3730 "cbc-des3_ede-caam",
3731 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3732 },
3733 .setkey = aead_setkey,
3734 .setauthsize = aead_setauthsize,
3735 .encrypt = aead_encrypt,
3736 .decrypt = aead_decrypt,
3737 .ivsize = DES3_EDE_BLOCK_SIZE,
3738 .maxauthsize = SHA384_DIGEST_SIZE,
3739 },
3740 .caam = {
3741 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3742 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3743 OP_ALG_AAI_HMAC_PRECOMP,
3744 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3745 .geniv = true,
3746 },
3747 },
3748 {
3749 .aead = {
3750 .base = {
3751 .cra_name = "authenc(hmac(sha512),"
3752 "cbc(des3_ede))",
3753 .cra_driver_name = "authenc-hmac-sha512-"
3754 "cbc-des3_ede-caam",
3755 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3756 },
3757 .setkey = aead_setkey,
3758 .setauthsize = aead_setauthsize,
3759 .encrypt = aead_encrypt,
3760 .decrypt = aead_decrypt,
3761 .ivsize = DES3_EDE_BLOCK_SIZE,
3762 .maxauthsize = SHA512_DIGEST_SIZE,
3763 },
3764 .caam = {
3765 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3766 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3767 OP_ALG_AAI_HMAC_PRECOMP,
3768 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3769 },
3770 },
3771 {
3772 .aead = {
3773 .base = {
3774 .cra_name = "echainiv(authenc(hmac(sha512),"
3775 "cbc(des3_ede)))",
3776 .cra_driver_name = "echainiv-authenc-"
3777 "hmac-sha512-"
3778 "cbc-des3_ede-caam",
3779 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3780 },
3781 .setkey = aead_setkey,
3782 .setauthsize = aead_setauthsize,
3783 .encrypt = aead_encrypt,
3784 .decrypt = aead_decrypt,
3785 .ivsize = DES3_EDE_BLOCK_SIZE,
3786 .maxauthsize = SHA512_DIGEST_SIZE,
3787 },
3788 .caam = {
3789 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3790 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3791 OP_ALG_AAI_HMAC_PRECOMP,
3792 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3793 .geniv = true,
3794 },
3795 },
3796 {
3797 .aead = {
3798 .base = {
3799 .cra_name = "authenc(hmac(md5),cbc(des))",
3800 .cra_driver_name = "authenc-hmac-md5-"
3801 "cbc-des-caam",
3802 .cra_blocksize = DES_BLOCK_SIZE,
3803 },
3804 .setkey = aead_setkey,
3805 .setauthsize = aead_setauthsize,
3806 .encrypt = aead_encrypt,
3807 .decrypt = aead_decrypt,
3808 .ivsize = DES_BLOCK_SIZE,
3809 .maxauthsize = MD5_DIGEST_SIZE,
3810 },
3811 .caam = {
3812 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3813 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3814 OP_ALG_AAI_HMAC_PRECOMP,
3815 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3816 },
3817 },
3818 {
3819 .aead = {
3820 .base = {
3821 .cra_name = "echainiv(authenc(hmac(md5),"
3822 "cbc(des)))",
3823 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3824 "cbc-des-caam",
3825 .cra_blocksize = DES_BLOCK_SIZE,
3826 },
3827 .setkey = aead_setkey,
3828 .setauthsize = aead_setauthsize,
3829 .encrypt = aead_encrypt,
3830 .decrypt = aead_decrypt,
3831 .ivsize = DES_BLOCK_SIZE,
3832 .maxauthsize = MD5_DIGEST_SIZE,
3833 },
3834 .caam = {
3835 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3836 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3837 OP_ALG_AAI_HMAC_PRECOMP,
3838 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3839 .geniv = true,
3840 },
3841 },
3842 {
3843 .aead = {
3844 .base = {
3845 .cra_name = "authenc(hmac(sha1),cbc(des))",
3846 .cra_driver_name = "authenc-hmac-sha1-"
3847 "cbc-des-caam",
3848 .cra_blocksize = DES_BLOCK_SIZE,
3849 },
3850 .setkey = aead_setkey,
3851 .setauthsize = aead_setauthsize,
3852 .encrypt = aead_encrypt,
3853 .decrypt = aead_decrypt,
3854 .ivsize = DES_BLOCK_SIZE,
3855 .maxauthsize = SHA1_DIGEST_SIZE,
3856 },
3857 .caam = {
3858 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3859 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3860 OP_ALG_AAI_HMAC_PRECOMP,
3861 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3862 },
3863 },
3864 {
3865 .aead = {
3866 .base = {
3867 .cra_name = "echainiv(authenc(hmac(sha1),"
3868 "cbc(des)))",
3869 .cra_driver_name = "echainiv-authenc-"
3870 "hmac-sha1-cbc-des-caam",
3871 .cra_blocksize = DES_BLOCK_SIZE,
3872 },
3873 .setkey = aead_setkey,
3874 .setauthsize = aead_setauthsize,
3875 .encrypt = aead_encrypt,
3876 .decrypt = aead_decrypt,
3877 .ivsize = DES_BLOCK_SIZE,
3878 .maxauthsize = SHA1_DIGEST_SIZE,
3879 },
3880 .caam = {
3881 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3882 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3883 OP_ALG_AAI_HMAC_PRECOMP,
3884 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3885 .geniv = true,
3886 },
3887 },
3888 {
3889 .aead = {
3890 .base = {
3891 .cra_name = "authenc(hmac(sha224),cbc(des))",
3892 .cra_driver_name = "authenc-hmac-sha224-"
3893 "cbc-des-caam",
3894 .cra_blocksize = DES_BLOCK_SIZE,
3895 },
3896 .setkey = aead_setkey,
3897 .setauthsize = aead_setauthsize,
3898 .encrypt = aead_encrypt,
3899 .decrypt = aead_decrypt,
3900 .ivsize = DES_BLOCK_SIZE,
3901 .maxauthsize = SHA224_DIGEST_SIZE,
3902 },
3903 .caam = {
3904 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3905 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3906 OP_ALG_AAI_HMAC_PRECOMP,
3907 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3908 },
3909 },
3910 {
3911 .aead = {
3912 .base = {
3913 .cra_name = "echainiv(authenc(hmac(sha224),"
3914 "cbc(des)))",
3915 .cra_driver_name = "echainiv-authenc-"
3916 "hmac-sha224-cbc-des-caam",
3917 .cra_blocksize = DES_BLOCK_SIZE,
3918 },
3919 .setkey = aead_setkey,
3920 .setauthsize = aead_setauthsize,
3921 .encrypt = aead_encrypt,
3922 .decrypt = aead_decrypt,
3923 .ivsize = DES_BLOCK_SIZE,
3924 .maxauthsize = SHA224_DIGEST_SIZE,
3925 },
3926 .caam = {
3927 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3928 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3929 OP_ALG_AAI_HMAC_PRECOMP,
3930 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3931 .geniv = true,
3932 },
3933 },
3934 {
3935 .aead = {
3936 .base = {
3937 .cra_name = "authenc(hmac(sha256),cbc(des))",
3938 .cra_driver_name = "authenc-hmac-sha256-"
3939 "cbc-des-caam",
3940 .cra_blocksize = DES_BLOCK_SIZE,
3941 },
3942 .setkey = aead_setkey,
3943 .setauthsize = aead_setauthsize,
3944 .encrypt = aead_encrypt,
3945 .decrypt = aead_decrypt,
3946 .ivsize = DES_BLOCK_SIZE,
3947 .maxauthsize = SHA256_DIGEST_SIZE,
3948 },
3949 .caam = {
3950 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3951 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3952 OP_ALG_AAI_HMAC_PRECOMP,
3953 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3954 },
3955 },
3956 {
3957 .aead = {
3958 .base = {
3959 .cra_name = "echainiv(authenc(hmac(sha256),"
3960 "cbc(des)))",
3961 .cra_driver_name = "echainiv-authenc-"
3962 "hmac-sha256-cbc-des-caam",
3963 .cra_blocksize = DES_BLOCK_SIZE,
3964 },
3965 .setkey = aead_setkey,
3966 .setauthsize = aead_setauthsize,
3967 .encrypt = aead_encrypt,
3968 .decrypt = aead_decrypt,
3969 .ivsize = DES_BLOCK_SIZE,
3970 .maxauthsize = SHA256_DIGEST_SIZE,
3971 },
3972 .caam = {
3973 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3974 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3975 OP_ALG_AAI_HMAC_PRECOMP,
3976 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3977 .geniv = true,
3978 },
3979 },
3980 {
3981 .aead = {
3982 .base = {
3983 .cra_name = "authenc(hmac(sha384),cbc(des))",
3984 .cra_driver_name = "authenc-hmac-sha384-"
3985 "cbc-des-caam",
3986 .cra_blocksize = DES_BLOCK_SIZE,
3987 },
3988 .setkey = aead_setkey,
3989 .setauthsize = aead_setauthsize,
3990 .encrypt = aead_encrypt,
3991 .decrypt = aead_decrypt,
3992 .ivsize = DES_BLOCK_SIZE,
3993 .maxauthsize = SHA384_DIGEST_SIZE,
3994 },
3995 .caam = {
3996 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3997 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3998 OP_ALG_AAI_HMAC_PRECOMP,
3999 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4000 },
4001 },
4002 {
4003 .aead = {
4004 .base = {
4005 .cra_name = "echainiv(authenc(hmac(sha384),"
4006 "cbc(des)))",
4007 .cra_driver_name = "echainiv-authenc-"
4008 "hmac-sha384-cbc-des-caam",
4009 .cra_blocksize = DES_BLOCK_SIZE,
4010 },
4011 .setkey = aead_setkey,
4012 .setauthsize = aead_setauthsize,
4013 .encrypt = aead_encrypt,
4014 .decrypt = aead_decrypt,
4015 .ivsize = DES_BLOCK_SIZE,
4016 .maxauthsize = SHA384_DIGEST_SIZE,
4017 },
4018 .caam = {
4019 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4020 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4021 OP_ALG_AAI_HMAC_PRECOMP,
4022 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4023 .geniv = true,
4024 },
4025 },
4026 {
4027 .aead = {
4028 .base = {
4029 .cra_name = "authenc(hmac(sha512),cbc(des))",
4030 .cra_driver_name = "authenc-hmac-sha512-"
4031 "cbc-des-caam",
4032 .cra_blocksize = DES_BLOCK_SIZE,
4033 },
4034 .setkey = aead_setkey,
4035 .setauthsize = aead_setauthsize,
4036 .encrypt = aead_encrypt,
4037 .decrypt = aead_decrypt,
4038 .ivsize = DES_BLOCK_SIZE,
4039 .maxauthsize = SHA512_DIGEST_SIZE,
4040 },
4041 .caam = {
4042 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4043 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4044 OP_ALG_AAI_HMAC_PRECOMP,
4045 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4046 },
4047 },
4048 {
4049 .aead = {
4050 .base = {
4051 .cra_name = "echainiv(authenc(hmac(sha512),"
4052 "cbc(des)))",
4053 .cra_driver_name = "echainiv-authenc-"
4054 "hmac-sha512-cbc-des-caam",
4055 .cra_blocksize = DES_BLOCK_SIZE,
4056 },
4057 .setkey = aead_setkey,
4058 .setauthsize = aead_setauthsize,
4059 .encrypt = aead_encrypt,
4060 .decrypt = aead_decrypt,
4061 .ivsize = DES_BLOCK_SIZE,
4062 .maxauthsize = SHA512_DIGEST_SIZE,
4063 },
4064 .caam = {
4065 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4066 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4067 OP_ALG_AAI_HMAC_PRECOMP,
4068 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4069 .geniv = true,
4070 },
4071 },
4072 {
4073 .aead = {
4074 .base = {
4075 .cra_name = "authenc(hmac(md5),"
4076 "rfc3686(ctr(aes)))",
4077 .cra_driver_name = "authenc-hmac-md5-"
4078 "rfc3686-ctr-aes-caam",
4079 .cra_blocksize = 1,
4080 },
4081 .setkey = aead_setkey,
4082 .setauthsize = aead_setauthsize,
4083 .encrypt = aead_encrypt,
4084 .decrypt = aead_decrypt,
4085 .ivsize = CTR_RFC3686_IV_SIZE,
4086 .maxauthsize = MD5_DIGEST_SIZE,
4087 },
4088 .caam = {
4089 .class1_alg_type = OP_ALG_ALGSEL_AES |
4090 OP_ALG_AAI_CTR_MOD128,
4091 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
4092 OP_ALG_AAI_HMAC_PRECOMP,
4093 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4094 .rfc3686 = true,
4095 },
4096 },
4097 {
4098 .aead = {
4099 .base = {
4100 .cra_name = "seqiv(authenc("
4101 "hmac(md5),rfc3686(ctr(aes))))",
4102 .cra_driver_name = "seqiv-authenc-hmac-md5-"
4103 "rfc3686-ctr-aes-caam",
4104 .cra_blocksize = 1,
4105 },
4106 .setkey = aead_setkey,
4107 .setauthsize = aead_setauthsize,
4108 .encrypt = aead_encrypt,
4109 .decrypt = aead_decrypt,
4110 .ivsize = CTR_RFC3686_IV_SIZE,
4111 .maxauthsize = MD5_DIGEST_SIZE,
4112 },
4113 .caam = {
4114 .class1_alg_type = OP_ALG_ALGSEL_AES |
4115 OP_ALG_AAI_CTR_MOD128,
4116 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
4117 OP_ALG_AAI_HMAC_PRECOMP,
4118 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4119 .rfc3686 = true,
4120 .geniv = true,
4121 },
4122 },
4123 {
4124 .aead = {
4125 .base = {
4126 .cra_name = "authenc(hmac(sha1),"
4127 "rfc3686(ctr(aes)))",
4128 .cra_driver_name = "authenc-hmac-sha1-"
4129 "rfc3686-ctr-aes-caam",
4130 .cra_blocksize = 1,
4131 },
4132 .setkey = aead_setkey,
4133 .setauthsize = aead_setauthsize,
4134 .encrypt = aead_encrypt,
4135 .decrypt = aead_decrypt,
4136 .ivsize = CTR_RFC3686_IV_SIZE,
4137 .maxauthsize = SHA1_DIGEST_SIZE,
4138 },
4139 .caam = {
4140 .class1_alg_type = OP_ALG_ALGSEL_AES |
4141 OP_ALG_AAI_CTR_MOD128,
4142 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4143 OP_ALG_AAI_HMAC_PRECOMP,
4144 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4145 .rfc3686 = true,
4146 },
4147 },
4148 {
4149 .aead = {
4150 .base = {
4151 .cra_name = "seqiv(authenc("
4152 "hmac(sha1),rfc3686(ctr(aes))))",
4153 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
4154 "rfc3686-ctr-aes-caam",
4155 .cra_blocksize = 1,
4156 },
4157 .setkey = aead_setkey,
4158 .setauthsize = aead_setauthsize,
4159 .encrypt = aead_encrypt,
4160 .decrypt = aead_decrypt,
4161 .ivsize = CTR_RFC3686_IV_SIZE,
4162 .maxauthsize = SHA1_DIGEST_SIZE,
4163 },
4164 .caam = {
4165 .class1_alg_type = OP_ALG_ALGSEL_AES |
4166 OP_ALG_AAI_CTR_MOD128,
4167 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4168 OP_ALG_AAI_HMAC_PRECOMP,
4169 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4170 .rfc3686 = true,
4171 .geniv = true,
4172 },
4173 },
4174 {
4175 .aead = {
4176 .base = {
4177 .cra_name = "authenc(hmac(sha224),"
4178 "rfc3686(ctr(aes)))",
4179 .cra_driver_name = "authenc-hmac-sha224-"
4180 "rfc3686-ctr-aes-caam",
4181 .cra_blocksize = 1,
4182 },
4183 .setkey = aead_setkey,
4184 .setauthsize = aead_setauthsize,
4185 .encrypt = aead_encrypt,
4186 .decrypt = aead_decrypt,
4187 .ivsize = CTR_RFC3686_IV_SIZE,
4188 .maxauthsize = SHA224_DIGEST_SIZE,
4189 },
4190 .caam = {
4191 .class1_alg_type = OP_ALG_ALGSEL_AES |
4192 OP_ALG_AAI_CTR_MOD128,
4193 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4194 OP_ALG_AAI_HMAC_PRECOMP,
4195 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4196 .rfc3686 = true,
4197 },
4198 },
4199 {
4200 .aead = {
4201 .base = {
4202 .cra_name = "seqiv(authenc("
4203 "hmac(sha224),rfc3686(ctr(aes))))",
4204 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
4205 "rfc3686-ctr-aes-caam",
4206 .cra_blocksize = 1,
4207 },
4208 .setkey = aead_setkey,
4209 .setauthsize = aead_setauthsize,
4210 .encrypt = aead_encrypt,
4211 .decrypt = aead_decrypt,
4212 .ivsize = CTR_RFC3686_IV_SIZE,
4213 .maxauthsize = SHA224_DIGEST_SIZE,
4214 },
4215 .caam = {
4216 .class1_alg_type = OP_ALG_ALGSEL_AES |
4217 OP_ALG_AAI_CTR_MOD128,
4218 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4219 OP_ALG_AAI_HMAC_PRECOMP,
4220 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4221 .rfc3686 = true,
4222 .geniv = true,
4223 },
4224 },
4225 {
4226 .aead = {
4227 .base = {
4228 .cra_name = "authenc(hmac(sha256),"
4229 "rfc3686(ctr(aes)))",
4230 .cra_driver_name = "authenc-hmac-sha256-"
4231 "rfc3686-ctr-aes-caam",
4232 .cra_blocksize = 1,
4233 },
4234 .setkey = aead_setkey,
4235 .setauthsize = aead_setauthsize,
4236 .encrypt = aead_encrypt,
4237 .decrypt = aead_decrypt,
4238 .ivsize = CTR_RFC3686_IV_SIZE,
4239 .maxauthsize = SHA256_DIGEST_SIZE,
4240 },
4241 .caam = {
4242 .class1_alg_type = OP_ALG_ALGSEL_AES |
4243 OP_ALG_AAI_CTR_MOD128,
4244 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4245 OP_ALG_AAI_HMAC_PRECOMP,
4246 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4247 .rfc3686 = true,
4248 },
4249 },
4250 {
4251 .aead = {
4252 .base = {
4253 .cra_name = "seqiv(authenc(hmac(sha256),"
4254 "rfc3686(ctr(aes))))",
4255 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
4256 "rfc3686-ctr-aes-caam",
4257 .cra_blocksize = 1,
4258 },
4259 .setkey = aead_setkey,
4260 .setauthsize = aead_setauthsize,
4261 .encrypt = aead_encrypt,
4262 .decrypt = aead_decrypt,
4263 .ivsize = CTR_RFC3686_IV_SIZE,
4264 .maxauthsize = SHA256_DIGEST_SIZE,
4265 },
4266 .caam = {
4267 .class1_alg_type = OP_ALG_ALGSEL_AES |
4268 OP_ALG_AAI_CTR_MOD128,
4269 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4270 OP_ALG_AAI_HMAC_PRECOMP,
4271 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4272 .rfc3686 = true,
4273 .geniv = true,
4274 },
4275 },
4276 {
4277 .aead = {
4278 .base = {
4279 .cra_name = "authenc(hmac(sha384),"
4280 "rfc3686(ctr(aes)))",
4281 .cra_driver_name = "authenc-hmac-sha384-"
4282 "rfc3686-ctr-aes-caam",
4283 .cra_blocksize = 1,
4284 },
4285 .setkey = aead_setkey,
4286 .setauthsize = aead_setauthsize,
4287 .encrypt = aead_encrypt,
4288 .decrypt = aead_decrypt,
4289 .ivsize = CTR_RFC3686_IV_SIZE,
4290 .maxauthsize = SHA384_DIGEST_SIZE,
4291 },
4292 .caam = {
4293 .class1_alg_type = OP_ALG_ALGSEL_AES |
4294 OP_ALG_AAI_CTR_MOD128,
4295 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4296 OP_ALG_AAI_HMAC_PRECOMP,
4297 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4298 .rfc3686 = true,
4299 },
4300 },
4301 {
4302 .aead = {
4303 .base = {
4304 .cra_name = "seqiv(authenc(hmac(sha384),"
4305 "rfc3686(ctr(aes))))",
4306 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
4307 "rfc3686-ctr-aes-caam",
4308 .cra_blocksize = 1,
4309 },
4310 .setkey = aead_setkey,
4311 .setauthsize = aead_setauthsize,
4312 .encrypt = aead_encrypt,
4313 .decrypt = aead_decrypt,
4314 .ivsize = CTR_RFC3686_IV_SIZE,
4315 .maxauthsize = SHA384_DIGEST_SIZE,
4316 },
4317 .caam = {
4318 .class1_alg_type = OP_ALG_ALGSEL_AES |
4319 OP_ALG_AAI_CTR_MOD128,
4320 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4321 OP_ALG_AAI_HMAC_PRECOMP,
4322 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4323 .rfc3686 = true,
4324 .geniv = true,
4325 },
4326 },
4327 {
4328 .aead = {
4329 .base = {
4330 .cra_name = "authenc(hmac(sha512),"
4331 "rfc3686(ctr(aes)))",
4332 .cra_driver_name = "authenc-hmac-sha512-"
4333 "rfc3686-ctr-aes-caam",
4334 .cra_blocksize = 1,
4335 },
4336 .setkey = aead_setkey,
4337 .setauthsize = aead_setauthsize,
4338 .encrypt = aead_encrypt,
4339 .decrypt = aead_decrypt,
4340 .ivsize = CTR_RFC3686_IV_SIZE,
4341 .maxauthsize = SHA512_DIGEST_SIZE,
4342 },
4343 .caam = {
4344 .class1_alg_type = OP_ALG_ALGSEL_AES |
4345 OP_ALG_AAI_CTR_MOD128,
4346 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4347 OP_ALG_AAI_HMAC_PRECOMP,
4348 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4349 .rfc3686 = true,
4350 },
4351 },
4352 {
4353 .aead = {
4354 .base = {
4355 .cra_name = "seqiv(authenc(hmac(sha512),"
4356 "rfc3686(ctr(aes))))",
4357 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
4358 "rfc3686-ctr-aes-caam",
4359 .cra_blocksize = 1,
4360 },
4361 .setkey = aead_setkey,
4362 .setauthsize = aead_setauthsize,
4363 .encrypt = aead_encrypt,
4364 .decrypt = aead_decrypt,
4365 .ivsize = CTR_RFC3686_IV_SIZE,
4366 .maxauthsize = SHA512_DIGEST_SIZE,
4367 },
4368 .caam = {
4369 .class1_alg_type = OP_ALG_ALGSEL_AES |
4370 OP_ALG_AAI_CTR_MOD128,
4371 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4372 OP_ALG_AAI_HMAC_PRECOMP,
4373 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4374 .rfc3686 = true,
4375 .geniv = true,
4376 },
4377 },
4378 };
4379
4380 struct caam_crypto_alg {
4381 struct crypto_alg crypto_alg;
4382 struct list_head entry;
4383 struct caam_alg_entry caam;
4384 };
4385
caam_init_common(struct caam_ctx * ctx,struct caam_alg_entry * caam)4386 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4387 {
4388 ctx->jrdev = caam_jr_alloc();
4389 if (IS_ERR(ctx->jrdev)) {
4390 pr_err("Job Ring Device allocation for transform failed\n");
4391 return PTR_ERR(ctx->jrdev);
4392 }
4393
4394 /* copy descriptor header template value */
4395 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4396 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4397 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
4398
4399 return 0;
4400 }
4401
caam_cra_init(struct crypto_tfm * tfm)4402 static int caam_cra_init(struct crypto_tfm *tfm)
4403 {
4404 struct crypto_alg *alg = tfm->__crt_alg;
4405 struct caam_crypto_alg *caam_alg =
4406 container_of(alg, struct caam_crypto_alg, crypto_alg);
4407 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
4408
4409 return caam_init_common(ctx, &caam_alg->caam);
4410 }
4411
caam_aead_init(struct crypto_aead * tfm)4412 static int caam_aead_init(struct crypto_aead *tfm)
4413 {
4414 struct aead_alg *alg = crypto_aead_alg(tfm);
4415 struct caam_aead_alg *caam_alg =
4416 container_of(alg, struct caam_aead_alg, aead);
4417 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
4418
4419 return caam_init_common(ctx, &caam_alg->caam);
4420 }
4421
caam_exit_common(struct caam_ctx * ctx)4422 static void caam_exit_common(struct caam_ctx *ctx)
4423 {
4424 if (ctx->sh_desc_enc_dma &&
4425 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
4426 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
4427 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
4428 if (ctx->sh_desc_dec_dma &&
4429 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
4430 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
4431 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
4432 if (ctx->sh_desc_givenc_dma &&
4433 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
4434 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
4435 desc_bytes(ctx->sh_desc_givenc),
4436 DMA_TO_DEVICE);
4437 if (ctx->key_dma &&
4438 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
4439 dma_unmap_single(ctx->jrdev, ctx->key_dma,
4440 ctx->enckeylen + ctx->split_key_pad_len,
4441 DMA_TO_DEVICE);
4442
4443 caam_jr_free(ctx->jrdev);
4444 }
4445
caam_cra_exit(struct crypto_tfm * tfm)4446 static void caam_cra_exit(struct crypto_tfm *tfm)
4447 {
4448 caam_exit_common(crypto_tfm_ctx(tfm));
4449 }
4450
caam_aead_exit(struct crypto_aead * tfm)4451 static void caam_aead_exit(struct crypto_aead *tfm)
4452 {
4453 caam_exit_common(crypto_aead_ctx(tfm));
4454 }
4455
caam_algapi_exit(void)4456 static void __exit caam_algapi_exit(void)
4457 {
4458
4459 struct caam_crypto_alg *t_alg, *n;
4460 int i;
4461
4462 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4463 struct caam_aead_alg *t_alg = driver_aeads + i;
4464
4465 if (t_alg->registered)
4466 crypto_unregister_aead(&t_alg->aead);
4467 }
4468
4469 if (!alg_list.next)
4470 return;
4471
4472 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
4473 crypto_unregister_alg(&t_alg->crypto_alg);
4474 list_del(&t_alg->entry);
4475 kfree(t_alg);
4476 }
4477 }
4478
caam_alg_alloc(struct caam_alg_template * template)4479 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
4480 *template)
4481 {
4482 struct caam_crypto_alg *t_alg;
4483 struct crypto_alg *alg;
4484
4485 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4486 if (!t_alg) {
4487 pr_err("failed to allocate t_alg\n");
4488 return ERR_PTR(-ENOMEM);
4489 }
4490
4491 alg = &t_alg->crypto_alg;
4492
4493 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
4494 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4495 template->driver_name);
4496 alg->cra_module = THIS_MODULE;
4497 alg->cra_init = caam_cra_init;
4498 alg->cra_exit = caam_cra_exit;
4499 alg->cra_priority = CAAM_CRA_PRIORITY;
4500 alg->cra_blocksize = template->blocksize;
4501 alg->cra_alignmask = 0;
4502 alg->cra_ctxsize = sizeof(struct caam_ctx);
4503 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
4504 template->type;
4505 switch (template->type) {
4506 case CRYPTO_ALG_TYPE_GIVCIPHER:
4507 alg->cra_type = &crypto_givcipher_type;
4508 alg->cra_ablkcipher = template->template_ablkcipher;
4509 break;
4510 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4511 alg->cra_type = &crypto_ablkcipher_type;
4512 alg->cra_ablkcipher = template->template_ablkcipher;
4513 break;
4514 }
4515
4516 t_alg->caam.class1_alg_type = template->class1_alg_type;
4517 t_alg->caam.class2_alg_type = template->class2_alg_type;
4518 t_alg->caam.alg_op = template->alg_op;
4519
4520 return t_alg;
4521 }
4522
caam_aead_alg_init(struct caam_aead_alg * t_alg)4523 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
4524 {
4525 struct aead_alg *alg = &t_alg->aead;
4526
4527 alg->base.cra_module = THIS_MODULE;
4528 alg->base.cra_priority = CAAM_CRA_PRIORITY;
4529 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
4530 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
4531
4532 alg->init = caam_aead_init;
4533 alg->exit = caam_aead_exit;
4534 }
4535
caam_algapi_init(void)4536 static int __init caam_algapi_init(void)
4537 {
4538 struct device_node *dev_node;
4539 struct platform_device *pdev;
4540 struct device *ctrldev;
4541 struct caam_drv_private *priv;
4542 int i = 0, err = 0;
4543 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
4544 unsigned int md_limit = SHA512_DIGEST_SIZE;
4545 bool registered = false;
4546
4547 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4548 if (!dev_node) {
4549 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4550 if (!dev_node)
4551 return -ENODEV;
4552 }
4553
4554 pdev = of_find_device_by_node(dev_node);
4555 if (!pdev) {
4556 of_node_put(dev_node);
4557 return -ENODEV;
4558 }
4559
4560 ctrldev = &pdev->dev;
4561 priv = dev_get_drvdata(ctrldev);
4562 of_node_put(dev_node);
4563
4564 /*
4565 * If priv is NULL, it's probably because the caam driver wasn't
4566 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4567 */
4568 if (!priv)
4569 return -ENODEV;
4570
4571
4572 INIT_LIST_HEAD(&alg_list);
4573
4574 /*
4575 * Register crypto algorithms the device supports.
4576 * First, detect presence and attributes of DES, AES, and MD blocks.
4577 */
4578 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4579 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4580 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
4581 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
4582 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4583
4584 /* If MD is present, limit digest size based on LP256 */
4585 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
4586 md_limit = SHA256_DIGEST_SIZE;
4587
4588 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4589 struct caam_crypto_alg *t_alg;
4590 struct caam_alg_template *alg = driver_algs + i;
4591 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
4592
4593 /* Skip DES algorithms if not supported by device */
4594 if (!des_inst &&
4595 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
4596 (alg_sel == OP_ALG_ALGSEL_DES)))
4597 continue;
4598
4599 /* Skip AES algorithms if not supported by device */
4600 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
4601 continue;
4602
4603 /*
4604 * Check support for AES modes not available
4605 * on LP devices.
4606 */
4607 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
4608 if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
4609 OP_ALG_AAI_XTS)
4610 continue;
4611
4612 t_alg = caam_alg_alloc(alg);
4613 if (IS_ERR(t_alg)) {
4614 err = PTR_ERR(t_alg);
4615 pr_warn("%s alg allocation failed\n", alg->driver_name);
4616 continue;
4617 }
4618
4619 err = crypto_register_alg(&t_alg->crypto_alg);
4620 if (err) {
4621 pr_warn("%s alg registration failed\n",
4622 t_alg->crypto_alg.cra_driver_name);
4623 kfree(t_alg);
4624 continue;
4625 }
4626
4627 list_add_tail(&t_alg->entry, &alg_list);
4628 registered = true;
4629 }
4630
4631 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4632 struct caam_aead_alg *t_alg = driver_aeads + i;
4633 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
4634 OP_ALG_ALGSEL_MASK;
4635 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
4636 OP_ALG_ALGSEL_MASK;
4637 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
4638
4639 /* Skip DES algorithms if not supported by device */
4640 if (!des_inst &&
4641 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
4642 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
4643 continue;
4644
4645 /* Skip AES algorithms if not supported by device */
4646 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
4647 continue;
4648
4649 /*
4650 * Check support for AES algorithms not available
4651 * on LP devices.
4652 */
4653 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
4654 if (alg_aai == OP_ALG_AAI_GCM)
4655 continue;
4656
4657 /*
4658 * Skip algorithms requiring message digests
4659 * if MD or MD size is not supported by device.
4660 */
4661 if (c2_alg_sel &&
4662 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
4663 continue;
4664
4665 caam_aead_alg_init(t_alg);
4666
4667 err = crypto_register_aead(&t_alg->aead);
4668 if (err) {
4669 pr_warn("%s alg registration failed\n",
4670 t_alg->aead.base.cra_driver_name);
4671 continue;
4672 }
4673
4674 t_alg->registered = true;
4675 registered = true;
4676 }
4677
4678 if (registered)
4679 pr_info("caam algorithms registered in /proc/crypto\n");
4680
4681 return err;
4682 }
4683
4684 module_init(caam_algapi_init);
4685 module_exit(caam_algapi_exit);
4686
4687 MODULE_LICENSE("GPL");
4688 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4689 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
4690