1 /*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 * Copyright 2016 NXP
6 *
7 * Based on talitos crypto API driver.
8 *
9 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
10 *
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (PDB) |
14 * --------------- |------------->| (hashKey) |
15 * . | | (cipherKey) |
16 * . | |-------->| (operation) |
17 * --------------- | | ---------------
18 * | JobDesc #2 |------| |
19 * | *(packet 2) | |
20 * --------------- |
21 * . |
22 * . |
23 * --------------- |
24 * | JobDesc #3 |------------
25 * | *(packet 3) |
26 * ---------------
27 *
28 * The SharedDesc never changes for a connection unless rekeyed, but
29 * each packet will likely be in a different place. So all we need
30 * to know to process the packet is where the input is, where the
31 * output goes, and what context we want to process with. Context is
32 * in the SharedDesc, packet references in the JobDesc.
33 *
34 * So, a job desc looks like:
35 *
36 * ---------------------
37 * | Header |
38 * | ShareDesc Pointer |
39 * | SEQ_OUT_PTR |
40 * | (output buffer) |
41 * | (output length) |
42 * | SEQ_IN_PTR |
43 * | (input buffer) |
44 * | (input length) |
45 * ---------------------
46 */
47
48 #include "compat.h"
49
50 #include "regs.h"
51 #include "intern.h"
52 #include "desc_constr.h"
53 #include "jr.h"
54 #include "error.h"
55 #include "sg_sw_sec4.h"
56 #include "key_gen.h"
57 #include "caamalg_desc.h"
58
59 /*
60 * crypto alg
61 */
62 #define CAAM_CRA_PRIORITY 3000
63 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
64 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
65 CTR_RFC3686_NONCE_SIZE + \
66 SHA512_DIGEST_SIZE * 2)
67
68 #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69 #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
70 CAAM_CMD_SZ * 4)
71 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
72 CAAM_CMD_SZ * 5)
73
74 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
75 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
76
77 #ifdef DEBUG
78 /* for print_hex_dumps with line references */
79 #define debug(format, arg...) printk(format, arg)
80 #else
81 #define debug(format, arg...)
82 #endif
83
84 static struct list_head alg_list;
85
86 struct caam_alg_entry {
87 int class1_alg_type;
88 int class2_alg_type;
89 bool rfc3686;
90 bool geniv;
91 };
92
93 struct caam_aead_alg {
94 struct aead_alg aead;
95 struct caam_alg_entry caam;
96 bool registered;
97 };
98
99 /*
100 * per-session context
101 */
102 struct caam_ctx {
103 u32 sh_desc_enc[DESC_MAX_USED_LEN];
104 u32 sh_desc_dec[DESC_MAX_USED_LEN];
105 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
106 u8 key[CAAM_MAX_KEY_SIZE];
107 dma_addr_t sh_desc_enc_dma;
108 dma_addr_t sh_desc_dec_dma;
109 dma_addr_t sh_desc_givenc_dma;
110 dma_addr_t key_dma;
111 struct device *jrdev;
112 struct alginfo adata;
113 struct alginfo cdata;
114 unsigned int authsize;
115 };
116
aead_null_set_sh_desc(struct crypto_aead * aead)117 static int aead_null_set_sh_desc(struct crypto_aead *aead)
118 {
119 struct caam_ctx *ctx = crypto_aead_ctx(aead);
120 struct device *jrdev = ctx->jrdev;
121 u32 *desc;
122 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
123 ctx->adata.keylen_pad;
124
125 /*
126 * Job Descriptor and Shared Descriptors
127 * must all fit into the 64-word Descriptor h/w Buffer
128 */
129 if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
130 ctx->adata.key_inline = true;
131 ctx->adata.key_virt = ctx->key;
132 } else {
133 ctx->adata.key_inline = false;
134 ctx->adata.key_dma = ctx->key_dma;
135 }
136
137 /* aead_encrypt shared descriptor */
138 desc = ctx->sh_desc_enc;
139 cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
140 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
141 desc_bytes(desc), DMA_TO_DEVICE);
142
143 /*
144 * Job Descriptor and Shared Descriptors
145 * must all fit into the 64-word Descriptor h/w Buffer
146 */
147 if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
148 ctx->adata.key_inline = true;
149 ctx->adata.key_virt = ctx->key;
150 } else {
151 ctx->adata.key_inline = false;
152 ctx->adata.key_dma = ctx->key_dma;
153 }
154
155 /* aead_decrypt shared descriptor */
156 desc = ctx->sh_desc_dec;
157 cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
158 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
159 desc_bytes(desc), DMA_TO_DEVICE);
160
161 return 0;
162 }
163
aead_set_sh_desc(struct crypto_aead * aead)164 static int aead_set_sh_desc(struct crypto_aead *aead)
165 {
166 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
167 struct caam_aead_alg, aead);
168 unsigned int ivsize = crypto_aead_ivsize(aead);
169 struct caam_ctx *ctx = crypto_aead_ctx(aead);
170 struct device *jrdev = ctx->jrdev;
171 u32 ctx1_iv_off = 0;
172 u32 *desc, *nonce = NULL;
173 u32 inl_mask;
174 unsigned int data_len[2];
175 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
176 OP_ALG_AAI_CTR_MOD128);
177 const bool is_rfc3686 = alg->caam.rfc3686;
178
179 if (!ctx->authsize)
180 return 0;
181
182 /* NULL encryption / decryption */
183 if (!ctx->cdata.keylen)
184 return aead_null_set_sh_desc(aead);
185
186 /*
187 * AES-CTR needs to load IV in CONTEXT1 reg
188 * at an offset of 128bits (16bytes)
189 * CONTEXT1[255:128] = IV
190 */
191 if (ctr_mode)
192 ctx1_iv_off = 16;
193
194 /*
195 * RFC3686 specific:
196 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
197 */
198 if (is_rfc3686) {
199 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
200 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
201 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
202 }
203
204 data_len[0] = ctx->adata.keylen_pad;
205 data_len[1] = ctx->cdata.keylen;
206
207 if (alg->caam.geniv)
208 goto skip_enc;
209
210 /*
211 * Job Descriptor and Shared Descriptors
212 * must all fit into the 64-word Descriptor h/w Buffer
213 */
214 if (desc_inline_query(DESC_AEAD_ENC_LEN +
215 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
216 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
217 ARRAY_SIZE(data_len)) < 0)
218 return -EINVAL;
219
220 if (inl_mask & 1)
221 ctx->adata.key_virt = ctx->key;
222 else
223 ctx->adata.key_dma = ctx->key_dma;
224
225 if (inl_mask & 2)
226 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
227 else
228 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
229
230 ctx->adata.key_inline = !!(inl_mask & 1);
231 ctx->cdata.key_inline = !!(inl_mask & 2);
232
233 /* aead_encrypt shared descriptor */
234 desc = ctx->sh_desc_enc;
235 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
236 ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
237 false);
238 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
239 desc_bytes(desc), DMA_TO_DEVICE);
240
241 skip_enc:
242 /*
243 * Job Descriptor and Shared Descriptors
244 * must all fit into the 64-word Descriptor h/w Buffer
245 */
246 if (desc_inline_query(DESC_AEAD_DEC_LEN +
247 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
248 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
249 ARRAY_SIZE(data_len)) < 0)
250 return -EINVAL;
251
252 if (inl_mask & 1)
253 ctx->adata.key_virt = ctx->key;
254 else
255 ctx->adata.key_dma = ctx->key_dma;
256
257 if (inl_mask & 2)
258 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
259 else
260 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
261
262 ctx->adata.key_inline = !!(inl_mask & 1);
263 ctx->cdata.key_inline = !!(inl_mask & 2);
264
265 /* aead_decrypt shared descriptor */
266 desc = ctx->sh_desc_dec;
267 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
268 ctx->authsize, alg->caam.geniv, is_rfc3686,
269 nonce, ctx1_iv_off, false);
270 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
271 desc_bytes(desc), DMA_TO_DEVICE);
272
273 if (!alg->caam.geniv)
274 goto skip_givenc;
275
276 /*
277 * Job Descriptor and Shared Descriptors
278 * must all fit into the 64-word Descriptor h/w Buffer
279 */
280 if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
281 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
282 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
283 ARRAY_SIZE(data_len)) < 0)
284 return -EINVAL;
285
286 if (inl_mask & 1)
287 ctx->adata.key_virt = ctx->key;
288 else
289 ctx->adata.key_dma = ctx->key_dma;
290
291 if (inl_mask & 2)
292 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
293 else
294 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
295
296 ctx->adata.key_inline = !!(inl_mask & 1);
297 ctx->cdata.key_inline = !!(inl_mask & 2);
298
299 /* aead_givencrypt shared descriptor */
300 desc = ctx->sh_desc_enc;
301 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
302 ctx->authsize, is_rfc3686, nonce,
303 ctx1_iv_off, false);
304 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
305 desc_bytes(desc), DMA_TO_DEVICE);
306
307 skip_givenc:
308 return 0;
309 }
310
aead_setauthsize(struct crypto_aead * authenc,unsigned int authsize)311 static int aead_setauthsize(struct crypto_aead *authenc,
312 unsigned int authsize)
313 {
314 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
315
316 ctx->authsize = authsize;
317 aead_set_sh_desc(authenc);
318
319 return 0;
320 }
321
gcm_set_sh_desc(struct crypto_aead * aead)322 static int gcm_set_sh_desc(struct crypto_aead *aead)
323 {
324 struct caam_ctx *ctx = crypto_aead_ctx(aead);
325 struct device *jrdev = ctx->jrdev;
326 u32 *desc;
327 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
328 ctx->cdata.keylen;
329
330 if (!ctx->cdata.keylen || !ctx->authsize)
331 return 0;
332
333 /*
334 * AES GCM encrypt shared descriptor
335 * Job Descriptor and Shared Descriptor
336 * must fit into the 64-word Descriptor h/w Buffer
337 */
338 if (rem_bytes >= DESC_GCM_ENC_LEN) {
339 ctx->cdata.key_inline = true;
340 ctx->cdata.key_virt = ctx->key;
341 } else {
342 ctx->cdata.key_inline = false;
343 ctx->cdata.key_dma = ctx->key_dma;
344 }
345
346 desc = ctx->sh_desc_enc;
347 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
348 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
349 desc_bytes(desc), DMA_TO_DEVICE);
350
351 /*
352 * Job Descriptor and Shared Descriptors
353 * must all fit into the 64-word Descriptor h/w Buffer
354 */
355 if (rem_bytes >= DESC_GCM_DEC_LEN) {
356 ctx->cdata.key_inline = true;
357 ctx->cdata.key_virt = ctx->key;
358 } else {
359 ctx->cdata.key_inline = false;
360 ctx->cdata.key_dma = ctx->key_dma;
361 }
362
363 desc = ctx->sh_desc_dec;
364 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
365 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
366 desc_bytes(desc), DMA_TO_DEVICE);
367
368 return 0;
369 }
370
gcm_setauthsize(struct crypto_aead * authenc,unsigned int authsize)371 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
372 {
373 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
374
375 ctx->authsize = authsize;
376 gcm_set_sh_desc(authenc);
377
378 return 0;
379 }
380
rfc4106_set_sh_desc(struct crypto_aead * aead)381 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
382 {
383 struct caam_ctx *ctx = crypto_aead_ctx(aead);
384 struct device *jrdev = ctx->jrdev;
385 u32 *desc;
386 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
387 ctx->cdata.keylen;
388
389 if (!ctx->cdata.keylen || !ctx->authsize)
390 return 0;
391
392 /*
393 * RFC4106 encrypt shared descriptor
394 * Job Descriptor and Shared Descriptor
395 * must fit into the 64-word Descriptor h/w Buffer
396 */
397 if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
398 ctx->cdata.key_inline = true;
399 ctx->cdata.key_virt = ctx->key;
400 } else {
401 ctx->cdata.key_inline = false;
402 ctx->cdata.key_dma = ctx->key_dma;
403 }
404
405 desc = ctx->sh_desc_enc;
406 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
407 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
408 desc_bytes(desc), DMA_TO_DEVICE);
409
410 /*
411 * Job Descriptor and Shared Descriptors
412 * must all fit into the 64-word Descriptor h/w Buffer
413 */
414 if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
415 ctx->cdata.key_inline = true;
416 ctx->cdata.key_virt = ctx->key;
417 } else {
418 ctx->cdata.key_inline = false;
419 ctx->cdata.key_dma = ctx->key_dma;
420 }
421
422 desc = ctx->sh_desc_dec;
423 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
424 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
425 desc_bytes(desc), DMA_TO_DEVICE);
426
427 return 0;
428 }
429
rfc4106_setauthsize(struct crypto_aead * authenc,unsigned int authsize)430 static int rfc4106_setauthsize(struct crypto_aead *authenc,
431 unsigned int authsize)
432 {
433 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
434
435 ctx->authsize = authsize;
436 rfc4106_set_sh_desc(authenc);
437
438 return 0;
439 }
440
rfc4543_set_sh_desc(struct crypto_aead * aead)441 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
442 {
443 struct caam_ctx *ctx = crypto_aead_ctx(aead);
444 struct device *jrdev = ctx->jrdev;
445 u32 *desc;
446 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
447 ctx->cdata.keylen;
448
449 if (!ctx->cdata.keylen || !ctx->authsize)
450 return 0;
451
452 /*
453 * RFC4543 encrypt shared descriptor
454 * Job Descriptor and Shared Descriptor
455 * must fit into the 64-word Descriptor h/w Buffer
456 */
457 if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
458 ctx->cdata.key_inline = true;
459 ctx->cdata.key_virt = ctx->key;
460 } else {
461 ctx->cdata.key_inline = false;
462 ctx->cdata.key_dma = ctx->key_dma;
463 }
464
465 desc = ctx->sh_desc_enc;
466 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
467 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
468 desc_bytes(desc), DMA_TO_DEVICE);
469
470 /*
471 * Job Descriptor and Shared Descriptors
472 * must all fit into the 64-word Descriptor h/w Buffer
473 */
474 if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
475 ctx->cdata.key_inline = true;
476 ctx->cdata.key_virt = ctx->key;
477 } else {
478 ctx->cdata.key_inline = false;
479 ctx->cdata.key_dma = ctx->key_dma;
480 }
481
482 desc = ctx->sh_desc_dec;
483 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
484 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
485 desc_bytes(desc), DMA_TO_DEVICE);
486
487 return 0;
488 }
489
rfc4543_setauthsize(struct crypto_aead * authenc,unsigned int authsize)490 static int rfc4543_setauthsize(struct crypto_aead *authenc,
491 unsigned int authsize)
492 {
493 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
494
495 ctx->authsize = authsize;
496 rfc4543_set_sh_desc(authenc);
497
498 return 0;
499 }
500
aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)501 static int aead_setkey(struct crypto_aead *aead,
502 const u8 *key, unsigned int keylen)
503 {
504 struct caam_ctx *ctx = crypto_aead_ctx(aead);
505 struct device *jrdev = ctx->jrdev;
506 struct crypto_authenc_keys keys;
507 int ret = 0;
508
509 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
510 goto badkey;
511
512 #ifdef DEBUG
513 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
514 keys.authkeylen + keys.enckeylen, keys.enckeylen,
515 keys.authkeylen);
516 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
517 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
518 #endif
519
520 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
521 keys.authkeylen, CAAM_MAX_KEY_SIZE -
522 keys.enckeylen);
523 if (ret) {
524 goto badkey;
525 }
526
527 /* postpend encryption key to auth split key */
528 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
529 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
530 keys.enckeylen, DMA_TO_DEVICE);
531 #ifdef DEBUG
532 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
533 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
534 ctx->adata.keylen_pad + keys.enckeylen, 1);
535 #endif
536 ctx->cdata.keylen = keys.enckeylen;
537 return aead_set_sh_desc(aead);
538 badkey:
539 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
540 return -EINVAL;
541 }
542
gcm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)543 static int gcm_setkey(struct crypto_aead *aead,
544 const u8 *key, unsigned int keylen)
545 {
546 struct caam_ctx *ctx = crypto_aead_ctx(aead);
547 struct device *jrdev = ctx->jrdev;
548
549 #ifdef DEBUG
550 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
551 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
552 #endif
553
554 memcpy(ctx->key, key, keylen);
555 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
556 ctx->cdata.keylen = keylen;
557
558 return gcm_set_sh_desc(aead);
559 }
560
rfc4106_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)561 static int rfc4106_setkey(struct crypto_aead *aead,
562 const u8 *key, unsigned int keylen)
563 {
564 struct caam_ctx *ctx = crypto_aead_ctx(aead);
565 struct device *jrdev = ctx->jrdev;
566
567 if (keylen < 4)
568 return -EINVAL;
569
570 #ifdef DEBUG
571 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
572 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
573 #endif
574
575 memcpy(ctx->key, key, keylen);
576
577 /*
578 * The last four bytes of the key material are used as the salt value
579 * in the nonce. Update the AES key length.
580 */
581 ctx->cdata.keylen = keylen - 4;
582 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
583 DMA_TO_DEVICE);
584 return rfc4106_set_sh_desc(aead);
585 }
586
rfc4543_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)587 static int rfc4543_setkey(struct crypto_aead *aead,
588 const u8 *key, unsigned int keylen)
589 {
590 struct caam_ctx *ctx = crypto_aead_ctx(aead);
591 struct device *jrdev = ctx->jrdev;
592
593 if (keylen < 4)
594 return -EINVAL;
595
596 #ifdef DEBUG
597 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
598 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
599 #endif
600
601 memcpy(ctx->key, key, keylen);
602
603 /*
604 * The last four bytes of the key material are used as the salt value
605 * in the nonce. Update the AES key length.
606 */
607 ctx->cdata.keylen = keylen - 4;
608 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
609 DMA_TO_DEVICE);
610 return rfc4543_set_sh_desc(aead);
611 }
612
ablkcipher_setkey(struct crypto_ablkcipher * ablkcipher,const u8 * key,unsigned int keylen)613 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
614 const u8 *key, unsigned int keylen)
615 {
616 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
617 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
618 const char *alg_name = crypto_tfm_alg_name(tfm);
619 struct device *jrdev = ctx->jrdev;
620 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
621 u32 *desc;
622 u32 ctx1_iv_off = 0;
623 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
624 OP_ALG_AAI_CTR_MOD128);
625 const bool is_rfc3686 = (ctr_mode &&
626 (strstr(alg_name, "rfc3686") != NULL));
627
628 memcpy(ctx->key, key, keylen);
629 #ifdef DEBUG
630 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
631 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
632 #endif
633 /*
634 * AES-CTR needs to load IV in CONTEXT1 reg
635 * at an offset of 128bits (16bytes)
636 * CONTEXT1[255:128] = IV
637 */
638 if (ctr_mode)
639 ctx1_iv_off = 16;
640
641 /*
642 * RFC3686 specific:
643 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
644 * | *key = {KEY, NONCE}
645 */
646 if (is_rfc3686) {
647 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
648 keylen -= CTR_RFC3686_NONCE_SIZE;
649 }
650
651 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
652 ctx->cdata.keylen = keylen;
653 ctx->cdata.key_virt = ctx->key;
654 ctx->cdata.key_inline = true;
655
656 /* ablkcipher_encrypt shared descriptor */
657 desc = ctx->sh_desc_enc;
658 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
659 ctx1_iv_off);
660 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
661 desc_bytes(desc), DMA_TO_DEVICE);
662
663 /* ablkcipher_decrypt shared descriptor */
664 desc = ctx->sh_desc_dec;
665 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
666 ctx1_iv_off);
667 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
668 desc_bytes(desc), DMA_TO_DEVICE);
669
670 /* ablkcipher_givencrypt shared descriptor */
671 desc = ctx->sh_desc_givenc;
672 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
673 ctx1_iv_off);
674 dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
675 desc_bytes(desc), DMA_TO_DEVICE);
676
677 return 0;
678 }
679
xts_ablkcipher_setkey(struct crypto_ablkcipher * ablkcipher,const u8 * key,unsigned int keylen)680 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
681 const u8 *key, unsigned int keylen)
682 {
683 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
684 struct device *jrdev = ctx->jrdev;
685 u32 *desc;
686
687 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
688 crypto_ablkcipher_set_flags(ablkcipher,
689 CRYPTO_TFM_RES_BAD_KEY_LEN);
690 dev_err(jrdev, "key size mismatch\n");
691 return -EINVAL;
692 }
693
694 memcpy(ctx->key, key, keylen);
695 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
696 ctx->cdata.keylen = keylen;
697 ctx->cdata.key_virt = ctx->key;
698 ctx->cdata.key_inline = true;
699
700 /* xts_ablkcipher_encrypt shared descriptor */
701 desc = ctx->sh_desc_enc;
702 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
703 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
704 desc_bytes(desc), DMA_TO_DEVICE);
705
706 /* xts_ablkcipher_decrypt shared descriptor */
707 desc = ctx->sh_desc_dec;
708 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
709 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
710 desc_bytes(desc), DMA_TO_DEVICE);
711
712 return 0;
713 }
714
715 /*
716 * aead_edesc - s/w-extended aead descriptor
717 * @src_nents: number of segments in input s/w scatterlist
718 * @dst_nents: number of segments in output s/w scatterlist
719 * @sec4_sg_bytes: length of dma mapped sec4_sg space
720 * @sec4_sg_dma: bus physical mapped address of h/w link table
721 * @sec4_sg: pointer to h/w link table
722 * @hw_desc: the h/w job descriptor followed by any referenced link tables
723 */
724 struct aead_edesc {
725 int src_nents;
726 int dst_nents;
727 int sec4_sg_bytes;
728 dma_addr_t sec4_sg_dma;
729 struct sec4_sg_entry *sec4_sg;
730 u32 hw_desc[];
731 };
732
733 /*
734 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
735 * @src_nents: number of segments in input s/w scatterlist
736 * @dst_nents: number of segments in output s/w scatterlist
737 * @iv_dma: dma address of iv for checking continuity and link table
738 * @iv_dir: DMA mapping direction for IV
739 * @sec4_sg_bytes: length of dma mapped sec4_sg space
740 * @sec4_sg_dma: bus physical mapped address of h/w link table
741 * @sec4_sg: pointer to h/w link table
742 * @hw_desc: the h/w job descriptor followed by any referenced link tables
743 * and IV
744 */
745 struct ablkcipher_edesc {
746 int src_nents;
747 int dst_nents;
748 dma_addr_t iv_dma;
749 enum dma_data_direction iv_dir;
750 int sec4_sg_bytes;
751 dma_addr_t sec4_sg_dma;
752 struct sec4_sg_entry *sec4_sg;
753 u32 hw_desc[0];
754 };
755
caam_unmap(struct device * dev,struct scatterlist * src,struct scatterlist * dst,int src_nents,int dst_nents,dma_addr_t iv_dma,int ivsize,enum dma_data_direction iv_dir,dma_addr_t sec4_sg_dma,int sec4_sg_bytes)756 static void caam_unmap(struct device *dev, struct scatterlist *src,
757 struct scatterlist *dst, int src_nents,
758 int dst_nents,
759 dma_addr_t iv_dma, int ivsize,
760 enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
761 int sec4_sg_bytes)
762 {
763 if (dst != src) {
764 if (src_nents)
765 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
766 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
767 } else {
768 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
769 }
770
771 if (iv_dma)
772 dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
773 if (sec4_sg_bytes)
774 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
775 DMA_TO_DEVICE);
776 }
777
aead_unmap(struct device * dev,struct aead_edesc * edesc,struct aead_request * req)778 static void aead_unmap(struct device *dev,
779 struct aead_edesc *edesc,
780 struct aead_request *req)
781 {
782 caam_unmap(dev, req->src, req->dst,
783 edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
784 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
785 }
786
ablkcipher_unmap(struct device * dev,struct ablkcipher_edesc * edesc,struct ablkcipher_request * req)787 static void ablkcipher_unmap(struct device *dev,
788 struct ablkcipher_edesc *edesc,
789 struct ablkcipher_request *req)
790 {
791 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
792 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
793
794 caam_unmap(dev, req->src, req->dst,
795 edesc->src_nents, edesc->dst_nents,
796 edesc->iv_dma, ivsize, edesc->iv_dir,
797 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
798 }
799
aead_encrypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)800 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
801 void *context)
802 {
803 struct aead_request *req = context;
804 struct aead_edesc *edesc;
805
806 #ifdef DEBUG
807 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
808 #endif
809
810 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
811
812 if (err)
813 caam_jr_strstatus(jrdev, err);
814
815 aead_unmap(jrdev, edesc, req);
816
817 kfree(edesc);
818
819 aead_request_complete(req, err);
820 }
821
aead_decrypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)822 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
823 void *context)
824 {
825 struct aead_request *req = context;
826 struct aead_edesc *edesc;
827
828 #ifdef DEBUG
829 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
830 #endif
831
832 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
833
834 if (err)
835 caam_jr_strstatus(jrdev, err);
836
837 aead_unmap(jrdev, edesc, req);
838
839 /*
840 * verify hw auth check passed else return -EBADMSG
841 */
842 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
843 err = -EBADMSG;
844
845 kfree(edesc);
846
847 aead_request_complete(req, err);
848 }
849
ablkcipher_encrypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)850 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
851 void *context)
852 {
853 struct ablkcipher_request *req = context;
854 struct ablkcipher_edesc *edesc;
855 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
856 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
857 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
858
859 #ifdef DEBUG
860 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
861 #endif
862
863 edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
864
865 if (err)
866 caam_jr_strstatus(jrdev, err);
867
868 #ifdef DEBUG
869 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
870 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
871 edesc->src_nents > 1 ? 100 : ivsize, 1);
872 #endif
873 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
874 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
875 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
876
877 ablkcipher_unmap(jrdev, edesc, req);
878
879 /*
880 * The crypto API expects us to set the IV (req->info) to the last
881 * ciphertext block when running in CBC mode.
882 */
883 if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
884 scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
885 ivsize, ivsize, 0);
886
887 /* In case initial IV was generated, copy it in GIVCIPHER request */
888 if (edesc->iv_dir == DMA_FROM_DEVICE) {
889 u8 *iv;
890 struct skcipher_givcrypt_request *greq;
891
892 greq = container_of(req, struct skcipher_givcrypt_request,
893 creq);
894 iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
895 edesc->sec4_sg_bytes;
896 memcpy(greq->giv, iv, ivsize);
897 }
898
899 kfree(edesc);
900
901 ablkcipher_request_complete(req, err);
902 }
903
ablkcipher_decrypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)904 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
905 void *context)
906 {
907 struct ablkcipher_request *req = context;
908 struct ablkcipher_edesc *edesc;
909 #ifdef DEBUG
910 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
911 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
912
913 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
914 #endif
915
916 edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
917 if (err)
918 caam_jr_strstatus(jrdev, err);
919
920 #ifdef DEBUG
921 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
922 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
923 ivsize, 1);
924 #endif
925 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
926 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
927 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
928
929 ablkcipher_unmap(jrdev, edesc, req);
930 kfree(edesc);
931
932 ablkcipher_request_complete(req, err);
933 }
934
935 /*
936 * Fill in aead job descriptor
937 */
init_aead_job(struct aead_request * req,struct aead_edesc * edesc,bool all_contig,bool encrypt)938 static void init_aead_job(struct aead_request *req,
939 struct aead_edesc *edesc,
940 bool all_contig, bool encrypt)
941 {
942 struct crypto_aead *aead = crypto_aead_reqtfm(req);
943 struct caam_ctx *ctx = crypto_aead_ctx(aead);
944 int authsize = ctx->authsize;
945 u32 *desc = edesc->hw_desc;
946 u32 out_options, in_options;
947 dma_addr_t dst_dma, src_dma;
948 int len, sec4_sg_index = 0;
949 dma_addr_t ptr;
950 u32 *sh_desc;
951
952 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
953 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
954
955 len = desc_len(sh_desc);
956 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
957
958 if (all_contig) {
959 src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
960 in_options = 0;
961 } else {
962 src_dma = edesc->sec4_sg_dma;
963 sec4_sg_index += edesc->src_nents;
964 in_options = LDST_SGF;
965 }
966
967 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
968 in_options);
969
970 dst_dma = src_dma;
971 out_options = in_options;
972
973 if (unlikely(req->src != req->dst)) {
974 if (edesc->dst_nents == 1) {
975 dst_dma = sg_dma_address(req->dst);
976 } else {
977 dst_dma = edesc->sec4_sg_dma +
978 sec4_sg_index *
979 sizeof(struct sec4_sg_entry);
980 out_options = LDST_SGF;
981 }
982 }
983
984 if (encrypt)
985 append_seq_out_ptr(desc, dst_dma,
986 req->assoclen + req->cryptlen + authsize,
987 out_options);
988 else
989 append_seq_out_ptr(desc, dst_dma,
990 req->assoclen + req->cryptlen - authsize,
991 out_options);
992
993 /* REG3 = assoclen */
994 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
995 }
996
init_gcm_job(struct aead_request * req,struct aead_edesc * edesc,bool all_contig,bool encrypt)997 static void init_gcm_job(struct aead_request *req,
998 struct aead_edesc *edesc,
999 bool all_contig, bool encrypt)
1000 {
1001 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1002 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1003 unsigned int ivsize = crypto_aead_ivsize(aead);
1004 u32 *desc = edesc->hw_desc;
1005 bool generic_gcm = (ivsize == 12);
1006 unsigned int last;
1007
1008 init_aead_job(req, edesc, all_contig, encrypt);
1009
1010 /* BUG This should not be specific to generic GCM. */
1011 last = 0;
1012 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
1013 last = FIFOLD_TYPE_LAST1;
1014
1015 /* Read GCM IV */
1016 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1017 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
1018 /* Append Salt */
1019 if (!generic_gcm)
1020 append_data(desc, ctx->key + ctx->cdata.keylen, 4);
1021 /* Append IV */
1022 append_data(desc, req->iv, ivsize);
1023 /* End of blank commands */
1024 }
1025
init_authenc_job(struct aead_request * req,struct aead_edesc * edesc,bool all_contig,bool encrypt)1026 static void init_authenc_job(struct aead_request *req,
1027 struct aead_edesc *edesc,
1028 bool all_contig, bool encrypt)
1029 {
1030 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1031 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
1032 struct caam_aead_alg, aead);
1033 unsigned int ivsize = crypto_aead_ivsize(aead);
1034 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1035 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
1036 OP_ALG_AAI_CTR_MOD128);
1037 const bool is_rfc3686 = alg->caam.rfc3686;
1038 u32 *desc = edesc->hw_desc;
1039 u32 ivoffset = 0;
1040
1041 /*
1042 * AES-CTR needs to load IV in CONTEXT1 reg
1043 * at an offset of 128bits (16bytes)
1044 * CONTEXT1[255:128] = IV
1045 */
1046 if (ctr_mode)
1047 ivoffset = 16;
1048
1049 /*
1050 * RFC3686 specific:
1051 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1052 */
1053 if (is_rfc3686)
1054 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
1055
1056 init_aead_job(req, edesc, all_contig, encrypt);
1057
1058 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
1059 append_load_as_imm(desc, req->iv, ivsize,
1060 LDST_CLASS_1_CCB |
1061 LDST_SRCDST_BYTE_CONTEXT |
1062 (ivoffset << LDST_OFFSET_SHIFT));
1063 }
1064
1065 /*
1066 * Fill in ablkcipher job descriptor
1067 */
init_ablkcipher_job(u32 * sh_desc,dma_addr_t ptr,struct ablkcipher_edesc * edesc,struct ablkcipher_request * req)1068 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1069 struct ablkcipher_edesc *edesc,
1070 struct ablkcipher_request *req)
1071 {
1072 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1073 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1074 u32 *desc = edesc->hw_desc;
1075 u32 out_options = 0;
1076 dma_addr_t dst_dma;
1077 int len;
1078
1079 #ifdef DEBUG
1080 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1081 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1082 ivsize, 1);
1083 pr_err("asked=%d, nbytes%d\n",
1084 (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
1085 #endif
1086 caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
1087 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1088 edesc->src_nents > 1 ? 100 : req->nbytes, 1);
1089
1090 len = desc_len(sh_desc);
1091 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1092
1093 append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
1094 LDST_SGF);
1095
1096 if (likely(req->src == req->dst)) {
1097 dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
1098 out_options = LDST_SGF;
1099 } else {
1100 if (edesc->dst_nents == 1) {
1101 dst_dma = sg_dma_address(req->dst);
1102 out_options = 0;
1103 } else {
1104 dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
1105 sizeof(struct sec4_sg_entry);
1106 out_options = LDST_SGF;
1107 }
1108 }
1109 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1110 }
1111
1112 /*
1113 * Fill in ablkcipher givencrypt job descriptor
1114 */
init_ablkcipher_giv_job(u32 * sh_desc,dma_addr_t ptr,struct ablkcipher_edesc * edesc,struct ablkcipher_request * req)1115 static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
1116 struct ablkcipher_edesc *edesc,
1117 struct ablkcipher_request *req)
1118 {
1119 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1120 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1121 u32 *desc = edesc->hw_desc;
1122 u32 in_options;
1123 dma_addr_t dst_dma, src_dma;
1124 int len, sec4_sg_index = 0;
1125
1126 #ifdef DEBUG
1127 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
1128 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1129 ivsize, 1);
1130 #endif
1131 caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
1132 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1133 edesc->src_nents > 1 ? 100 : req->nbytes, 1);
1134
1135 len = desc_len(sh_desc);
1136 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1137
1138 if (edesc->src_nents == 1) {
1139 src_dma = sg_dma_address(req->src);
1140 in_options = 0;
1141 } else {
1142 src_dma = edesc->sec4_sg_dma;
1143 sec4_sg_index += edesc->src_nents;
1144 in_options = LDST_SGF;
1145 }
1146 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
1147
1148 dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
1149 sizeof(struct sec4_sg_entry);
1150 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
1151 }
1152
1153 /*
1154 * allocate and map the aead extended descriptor
1155 */
aead_edesc_alloc(struct aead_request * req,int desc_bytes,bool * all_contig_ptr,bool encrypt)1156 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1157 int desc_bytes, bool *all_contig_ptr,
1158 bool encrypt)
1159 {
1160 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1161 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1162 struct device *jrdev = ctx->jrdev;
1163 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1164 GFP_KERNEL : GFP_ATOMIC;
1165 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1166 struct aead_edesc *edesc;
1167 int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
1168 unsigned int authsize = ctx->authsize;
1169
1170 if (unlikely(req->dst != req->src)) {
1171 src_nents = sg_nents_for_len(req->src, req->assoclen +
1172 req->cryptlen);
1173 if (unlikely(src_nents < 0)) {
1174 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1175 req->assoclen + req->cryptlen);
1176 return ERR_PTR(src_nents);
1177 }
1178
1179 dst_nents = sg_nents_for_len(req->dst, req->assoclen +
1180 req->cryptlen +
1181 (encrypt ? authsize :
1182 (-authsize)));
1183 if (unlikely(dst_nents < 0)) {
1184 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1185 req->assoclen + req->cryptlen +
1186 (encrypt ? authsize : (-authsize)));
1187 return ERR_PTR(dst_nents);
1188 }
1189 } else {
1190 src_nents = sg_nents_for_len(req->src, req->assoclen +
1191 req->cryptlen +
1192 (encrypt ? authsize : 0));
1193 if (unlikely(src_nents < 0)) {
1194 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1195 req->assoclen + req->cryptlen +
1196 (encrypt ? authsize : 0));
1197 return ERR_PTR(src_nents);
1198 }
1199 }
1200
1201 if (likely(req->src == req->dst)) {
1202 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1203 DMA_BIDIRECTIONAL);
1204 if (unlikely(!mapped_src_nents)) {
1205 dev_err(jrdev, "unable to map source\n");
1206 return ERR_PTR(-ENOMEM);
1207 }
1208 } else {
1209 /* Cover also the case of null (zero length) input data */
1210 if (src_nents) {
1211 mapped_src_nents = dma_map_sg(jrdev, req->src,
1212 src_nents, DMA_TO_DEVICE);
1213 if (unlikely(!mapped_src_nents)) {
1214 dev_err(jrdev, "unable to map source\n");
1215 return ERR_PTR(-ENOMEM);
1216 }
1217 } else {
1218 mapped_src_nents = 0;
1219 }
1220
1221 mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
1222 DMA_FROM_DEVICE);
1223 if (unlikely(!mapped_dst_nents)) {
1224 dev_err(jrdev, "unable to map destination\n");
1225 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1226 return ERR_PTR(-ENOMEM);
1227 }
1228 }
1229
1230 sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
1231 sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1232 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1233
1234 /* allocate space for base edesc and hw desc commands, link tables */
1235 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
1236 GFP_DMA | flags);
1237 if (!edesc) {
1238 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1239 0, DMA_NONE, 0, 0);
1240 return ERR_PTR(-ENOMEM);
1241 }
1242
1243 edesc->src_nents = src_nents;
1244 edesc->dst_nents = dst_nents;
1245 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1246 desc_bytes;
1247 *all_contig_ptr = !(mapped_src_nents > 1);
1248
1249 sec4_sg_index = 0;
1250 if (mapped_src_nents > 1) {
1251 sg_to_sec4_sg_last(req->src, mapped_src_nents,
1252 edesc->sec4_sg + sec4_sg_index, 0);
1253 sec4_sg_index += mapped_src_nents;
1254 }
1255 if (mapped_dst_nents > 1) {
1256 sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
1257 edesc->sec4_sg + sec4_sg_index, 0);
1258 }
1259
1260 if (!sec4_sg_bytes)
1261 return edesc;
1262
1263 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1264 sec4_sg_bytes, DMA_TO_DEVICE);
1265 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1266 dev_err(jrdev, "unable to map S/G table\n");
1267 aead_unmap(jrdev, edesc, req);
1268 kfree(edesc);
1269 return ERR_PTR(-ENOMEM);
1270 }
1271
1272 edesc->sec4_sg_bytes = sec4_sg_bytes;
1273
1274 return edesc;
1275 }
1276
gcm_encrypt(struct aead_request * req)1277 static int gcm_encrypt(struct aead_request *req)
1278 {
1279 struct aead_edesc *edesc;
1280 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1281 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1282 struct device *jrdev = ctx->jrdev;
1283 bool all_contig;
1284 u32 *desc;
1285 int ret = 0;
1286
1287 /* allocate extended descriptor */
1288 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
1289 if (IS_ERR(edesc))
1290 return PTR_ERR(edesc);
1291
1292 /* Create and submit job descriptor */
1293 init_gcm_job(req, edesc, all_contig, true);
1294 #ifdef DEBUG
1295 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1296 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1297 desc_bytes(edesc->hw_desc), 1);
1298 #endif
1299
1300 desc = edesc->hw_desc;
1301 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1302 if (!ret) {
1303 ret = -EINPROGRESS;
1304 } else {
1305 aead_unmap(jrdev, edesc, req);
1306 kfree(edesc);
1307 }
1308
1309 return ret;
1310 }
1311
ipsec_gcm_encrypt(struct aead_request * req)1312 static int ipsec_gcm_encrypt(struct aead_request *req)
1313 {
1314 if (req->assoclen < 8)
1315 return -EINVAL;
1316
1317 return gcm_encrypt(req);
1318 }
1319
aead_encrypt(struct aead_request * req)1320 static int aead_encrypt(struct aead_request *req)
1321 {
1322 struct aead_edesc *edesc;
1323 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1324 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1325 struct device *jrdev = ctx->jrdev;
1326 bool all_contig;
1327 u32 *desc;
1328 int ret = 0;
1329
1330 /* allocate extended descriptor */
1331 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1332 &all_contig, true);
1333 if (IS_ERR(edesc))
1334 return PTR_ERR(edesc);
1335
1336 /* Create and submit job descriptor */
1337 init_authenc_job(req, edesc, all_contig, true);
1338 #ifdef DEBUG
1339 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1340 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1341 desc_bytes(edesc->hw_desc), 1);
1342 #endif
1343
1344 desc = edesc->hw_desc;
1345 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1346 if (!ret) {
1347 ret = -EINPROGRESS;
1348 } else {
1349 aead_unmap(jrdev, edesc, req);
1350 kfree(edesc);
1351 }
1352
1353 return ret;
1354 }
1355
gcm_decrypt(struct aead_request * req)1356 static int gcm_decrypt(struct aead_request *req)
1357 {
1358 struct aead_edesc *edesc;
1359 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1360 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1361 struct device *jrdev = ctx->jrdev;
1362 bool all_contig;
1363 u32 *desc;
1364 int ret = 0;
1365
1366 /* allocate extended descriptor */
1367 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
1368 if (IS_ERR(edesc))
1369 return PTR_ERR(edesc);
1370
1371 /* Create and submit job descriptor*/
1372 init_gcm_job(req, edesc, all_contig, false);
1373 #ifdef DEBUG
1374 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1375 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1376 desc_bytes(edesc->hw_desc), 1);
1377 #endif
1378
1379 desc = edesc->hw_desc;
1380 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1381 if (!ret) {
1382 ret = -EINPROGRESS;
1383 } else {
1384 aead_unmap(jrdev, edesc, req);
1385 kfree(edesc);
1386 }
1387
1388 return ret;
1389 }
1390
ipsec_gcm_decrypt(struct aead_request * req)1391 static int ipsec_gcm_decrypt(struct aead_request *req)
1392 {
1393 if (req->assoclen < 8)
1394 return -EINVAL;
1395
1396 return gcm_decrypt(req);
1397 }
1398
aead_decrypt(struct aead_request * req)1399 static int aead_decrypt(struct aead_request *req)
1400 {
1401 struct aead_edesc *edesc;
1402 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1403 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1404 struct device *jrdev = ctx->jrdev;
1405 bool all_contig;
1406 u32 *desc;
1407 int ret = 0;
1408
1409 caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
1410 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1411 req->assoclen + req->cryptlen, 1);
1412
1413 /* allocate extended descriptor */
1414 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1415 &all_contig, false);
1416 if (IS_ERR(edesc))
1417 return PTR_ERR(edesc);
1418
1419 /* Create and submit job descriptor*/
1420 init_authenc_job(req, edesc, all_contig, false);
1421 #ifdef DEBUG
1422 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1423 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1424 desc_bytes(edesc->hw_desc), 1);
1425 #endif
1426
1427 desc = edesc->hw_desc;
1428 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1429 if (!ret) {
1430 ret = -EINPROGRESS;
1431 } else {
1432 aead_unmap(jrdev, edesc, req);
1433 kfree(edesc);
1434 }
1435
1436 return ret;
1437 }
1438
1439 /*
1440 * allocate and map the ablkcipher extended descriptor for ablkcipher
1441 */
ablkcipher_edesc_alloc(struct ablkcipher_request * req,int desc_bytes)1442 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1443 *req, int desc_bytes)
1444 {
1445 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1446 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1447 struct device *jrdev = ctx->jrdev;
1448 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1449 GFP_KERNEL : GFP_ATOMIC;
1450 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1451 struct ablkcipher_edesc *edesc;
1452 dma_addr_t iv_dma;
1453 u8 *iv;
1454 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1455 int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
1456
1457 src_nents = sg_nents_for_len(req->src, req->nbytes);
1458 if (unlikely(src_nents < 0)) {
1459 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1460 req->nbytes);
1461 return ERR_PTR(src_nents);
1462 }
1463
1464 if (req->dst != req->src) {
1465 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1466 if (unlikely(dst_nents < 0)) {
1467 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1468 req->nbytes);
1469 return ERR_PTR(dst_nents);
1470 }
1471 }
1472
1473 if (likely(req->src == req->dst)) {
1474 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1475 DMA_BIDIRECTIONAL);
1476 if (unlikely(!mapped_src_nents)) {
1477 dev_err(jrdev, "unable to map source\n");
1478 return ERR_PTR(-ENOMEM);
1479 }
1480 } else {
1481 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1482 DMA_TO_DEVICE);
1483 if (unlikely(!mapped_src_nents)) {
1484 dev_err(jrdev, "unable to map source\n");
1485 return ERR_PTR(-ENOMEM);
1486 }
1487
1488 mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
1489 DMA_FROM_DEVICE);
1490 if (unlikely(!mapped_dst_nents)) {
1491 dev_err(jrdev, "unable to map destination\n");
1492 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1493 return ERR_PTR(-ENOMEM);
1494 }
1495 }
1496
1497 sec4_sg_ents = 1 + mapped_src_nents;
1498 dst_sg_idx = sec4_sg_ents;
1499 sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1500 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
1501
1502 /*
1503 * allocate space for base edesc and hw desc commands, link tables, IV
1504 */
1505 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
1506 GFP_DMA | flags);
1507 if (!edesc) {
1508 dev_err(jrdev, "could not allocate extended descriptor\n");
1509 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1510 0, DMA_NONE, 0, 0);
1511 return ERR_PTR(-ENOMEM);
1512 }
1513
1514 edesc->src_nents = src_nents;
1515 edesc->dst_nents = dst_nents;
1516 edesc->sec4_sg_bytes = sec4_sg_bytes;
1517 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
1518 desc_bytes);
1519 edesc->iv_dir = DMA_TO_DEVICE;
1520
1521 /* Make sure IV is located in a DMAable area */
1522 iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
1523 memcpy(iv, req->info, ivsize);
1524
1525 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
1526 if (dma_mapping_error(jrdev, iv_dma)) {
1527 dev_err(jrdev, "unable to map IV\n");
1528 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1529 0, DMA_NONE, 0, 0);
1530 kfree(edesc);
1531 return ERR_PTR(-ENOMEM);
1532 }
1533
1534 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1535 sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
1536
1537 if (mapped_dst_nents > 1) {
1538 sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
1539 edesc->sec4_sg + dst_sg_idx, 0);
1540 }
1541
1542 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1543 sec4_sg_bytes, DMA_TO_DEVICE);
1544 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1545 dev_err(jrdev, "unable to map S/G table\n");
1546 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1547 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
1548 kfree(edesc);
1549 return ERR_PTR(-ENOMEM);
1550 }
1551
1552 edesc->iv_dma = iv_dma;
1553
1554 #ifdef DEBUG
1555 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
1556 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1557 sec4_sg_bytes, 1);
1558 #endif
1559
1560 return edesc;
1561 }
1562
ablkcipher_encrypt(struct ablkcipher_request * req)1563 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1564 {
1565 struct ablkcipher_edesc *edesc;
1566 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1567 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1568 struct device *jrdev = ctx->jrdev;
1569 u32 *desc;
1570 int ret = 0;
1571
1572 /* allocate extended descriptor */
1573 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1574 if (IS_ERR(edesc))
1575 return PTR_ERR(edesc);
1576
1577 /* Create and submit job descriptor*/
1578 init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
1579 #ifdef DEBUG
1580 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1581 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1582 desc_bytes(edesc->hw_desc), 1);
1583 #endif
1584 desc = edesc->hw_desc;
1585 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1586
1587 if (!ret) {
1588 ret = -EINPROGRESS;
1589 } else {
1590 ablkcipher_unmap(jrdev, edesc, req);
1591 kfree(edesc);
1592 }
1593
1594 return ret;
1595 }
1596
ablkcipher_decrypt(struct ablkcipher_request * req)1597 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1598 {
1599 struct ablkcipher_edesc *edesc;
1600 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1601 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1602 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1603 struct device *jrdev = ctx->jrdev;
1604 u32 *desc;
1605 int ret = 0;
1606
1607 /* allocate extended descriptor */
1608 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1609 if (IS_ERR(edesc))
1610 return PTR_ERR(edesc);
1611
1612 /*
1613 * The crypto API expects us to set the IV (req->info) to the last
1614 * ciphertext block when running in CBC mode.
1615 */
1616 if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
1617 scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
1618 ivsize, ivsize, 0);
1619
1620 /* Create and submit job descriptor*/
1621 init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
1622 desc = edesc->hw_desc;
1623 #ifdef DEBUG
1624 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1625 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1626 desc_bytes(edesc->hw_desc), 1);
1627 #endif
1628
1629 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1630 if (!ret) {
1631 ret = -EINPROGRESS;
1632 } else {
1633 ablkcipher_unmap(jrdev, edesc, req);
1634 kfree(edesc);
1635 }
1636
1637 return ret;
1638 }
1639
1640 /*
1641 * allocate and map the ablkcipher extended descriptor
1642 * for ablkcipher givencrypt
1643 */
ablkcipher_giv_edesc_alloc(struct skcipher_givcrypt_request * greq,int desc_bytes)1644 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1645 struct skcipher_givcrypt_request *greq,
1646 int desc_bytes)
1647 {
1648 struct ablkcipher_request *req = &greq->creq;
1649 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1650 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1651 struct device *jrdev = ctx->jrdev;
1652 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1653 GFP_KERNEL : GFP_ATOMIC;
1654 int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
1655 struct ablkcipher_edesc *edesc;
1656 dma_addr_t iv_dma;
1657 u8 *iv;
1658 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1659 int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
1660
1661 src_nents = sg_nents_for_len(req->src, req->nbytes);
1662 if (unlikely(src_nents < 0)) {
1663 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1664 req->nbytes);
1665 return ERR_PTR(src_nents);
1666 }
1667
1668 if (likely(req->src == req->dst)) {
1669 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1670 DMA_BIDIRECTIONAL);
1671 if (unlikely(!mapped_src_nents)) {
1672 dev_err(jrdev, "unable to map source\n");
1673 return ERR_PTR(-ENOMEM);
1674 }
1675
1676 dst_nents = src_nents;
1677 mapped_dst_nents = src_nents;
1678 } else {
1679 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1680 DMA_TO_DEVICE);
1681 if (unlikely(!mapped_src_nents)) {
1682 dev_err(jrdev, "unable to map source\n");
1683 return ERR_PTR(-ENOMEM);
1684 }
1685
1686 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1687 if (unlikely(dst_nents < 0)) {
1688 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1689 req->nbytes);
1690 return ERR_PTR(dst_nents);
1691 }
1692
1693 mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
1694 DMA_FROM_DEVICE);
1695 if (unlikely(!mapped_dst_nents)) {
1696 dev_err(jrdev, "unable to map destination\n");
1697 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1698 return ERR_PTR(-ENOMEM);
1699 }
1700 }
1701
1702 sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1703 dst_sg_idx = sec4_sg_ents;
1704 sec4_sg_ents += 1 + mapped_dst_nents;
1705
1706 /*
1707 * allocate space for base edesc and hw desc commands, link tables, IV
1708 */
1709 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
1710 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
1711 GFP_DMA | flags);
1712 if (!edesc) {
1713 dev_err(jrdev, "could not allocate extended descriptor\n");
1714 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1715 0, DMA_NONE, 0, 0);
1716 return ERR_PTR(-ENOMEM);
1717 }
1718
1719 edesc->src_nents = src_nents;
1720 edesc->dst_nents = dst_nents;
1721 edesc->sec4_sg_bytes = sec4_sg_bytes;
1722 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
1723 desc_bytes);
1724 edesc->iv_dir = DMA_FROM_DEVICE;
1725
1726 /* Make sure IV is located in a DMAable area */
1727 iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
1728 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
1729 if (dma_mapping_error(jrdev, iv_dma)) {
1730 dev_err(jrdev, "unable to map IV\n");
1731 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1732 0, DMA_NONE, 0, 0);
1733 kfree(edesc);
1734 return ERR_PTR(-ENOMEM);
1735 }
1736
1737 if (mapped_src_nents > 1)
1738 sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
1739 0);
1740
1741 dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
1742 sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
1743 dst_sg_idx + 1, 0);
1744
1745 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1746 sec4_sg_bytes, DMA_TO_DEVICE);
1747 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1748 dev_err(jrdev, "unable to map S/G table\n");
1749 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1750 iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
1751 kfree(edesc);
1752 return ERR_PTR(-ENOMEM);
1753 }
1754 edesc->iv_dma = iv_dma;
1755
1756 #ifdef DEBUG
1757 print_hex_dump(KERN_ERR,
1758 "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
1759 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1760 sec4_sg_bytes, 1);
1761 #endif
1762
1763 return edesc;
1764 }
1765
ablkcipher_givencrypt(struct skcipher_givcrypt_request * creq)1766 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1767 {
1768 struct ablkcipher_request *req = &creq->creq;
1769 struct ablkcipher_edesc *edesc;
1770 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1771 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1772 struct device *jrdev = ctx->jrdev;
1773 u32 *desc;
1774 int ret = 0;
1775
1776 /* allocate extended descriptor */
1777 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1778 if (IS_ERR(edesc))
1779 return PTR_ERR(edesc);
1780
1781 /* Create and submit job descriptor*/
1782 init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
1783 edesc, req);
1784 #ifdef DEBUG
1785 print_hex_dump(KERN_ERR,
1786 "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
1787 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1788 desc_bytes(edesc->hw_desc), 1);
1789 #endif
1790 desc = edesc->hw_desc;
1791 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1792
1793 if (!ret) {
1794 ret = -EINPROGRESS;
1795 } else {
1796 ablkcipher_unmap(jrdev, edesc, req);
1797 kfree(edesc);
1798 }
1799
1800 return ret;
1801 }
1802
1803 #define template_aead template_u.aead
1804 #define template_ablkcipher template_u.ablkcipher
1805 struct caam_alg_template {
1806 char name[CRYPTO_MAX_ALG_NAME];
1807 char driver_name[CRYPTO_MAX_ALG_NAME];
1808 unsigned int blocksize;
1809 u32 type;
1810 union {
1811 struct ablkcipher_alg ablkcipher;
1812 } template_u;
1813 u32 class1_alg_type;
1814 u32 class2_alg_type;
1815 };
1816
1817 static struct caam_alg_template driver_algs[] = {
1818 /* ablkcipher descriptor */
1819 {
1820 .name = "cbc(aes)",
1821 .driver_name = "cbc-aes-caam",
1822 .blocksize = AES_BLOCK_SIZE,
1823 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1824 .template_ablkcipher = {
1825 .setkey = ablkcipher_setkey,
1826 .encrypt = ablkcipher_encrypt,
1827 .decrypt = ablkcipher_decrypt,
1828 .givencrypt = ablkcipher_givencrypt,
1829 .geniv = "<built-in>",
1830 .min_keysize = AES_MIN_KEY_SIZE,
1831 .max_keysize = AES_MAX_KEY_SIZE,
1832 .ivsize = AES_BLOCK_SIZE,
1833 },
1834 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1835 },
1836 {
1837 .name = "cbc(des3_ede)",
1838 .driver_name = "cbc-3des-caam",
1839 .blocksize = DES3_EDE_BLOCK_SIZE,
1840 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1841 .template_ablkcipher = {
1842 .setkey = ablkcipher_setkey,
1843 .encrypt = ablkcipher_encrypt,
1844 .decrypt = ablkcipher_decrypt,
1845 .givencrypt = ablkcipher_givencrypt,
1846 .geniv = "<built-in>",
1847 .min_keysize = DES3_EDE_KEY_SIZE,
1848 .max_keysize = DES3_EDE_KEY_SIZE,
1849 .ivsize = DES3_EDE_BLOCK_SIZE,
1850 },
1851 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1852 },
1853 {
1854 .name = "cbc(des)",
1855 .driver_name = "cbc-des-caam",
1856 .blocksize = DES_BLOCK_SIZE,
1857 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1858 .template_ablkcipher = {
1859 .setkey = ablkcipher_setkey,
1860 .encrypt = ablkcipher_encrypt,
1861 .decrypt = ablkcipher_decrypt,
1862 .givencrypt = ablkcipher_givencrypt,
1863 .geniv = "<built-in>",
1864 .min_keysize = DES_KEY_SIZE,
1865 .max_keysize = DES_KEY_SIZE,
1866 .ivsize = DES_BLOCK_SIZE,
1867 },
1868 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1869 },
1870 {
1871 .name = "ctr(aes)",
1872 .driver_name = "ctr-aes-caam",
1873 .blocksize = 1,
1874 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1875 .template_ablkcipher = {
1876 .setkey = ablkcipher_setkey,
1877 .encrypt = ablkcipher_encrypt,
1878 .decrypt = ablkcipher_decrypt,
1879 .geniv = "chainiv",
1880 .min_keysize = AES_MIN_KEY_SIZE,
1881 .max_keysize = AES_MAX_KEY_SIZE,
1882 .ivsize = AES_BLOCK_SIZE,
1883 },
1884 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1885 },
1886 {
1887 .name = "rfc3686(ctr(aes))",
1888 .driver_name = "rfc3686-ctr-aes-caam",
1889 .blocksize = 1,
1890 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1891 .template_ablkcipher = {
1892 .setkey = ablkcipher_setkey,
1893 .encrypt = ablkcipher_encrypt,
1894 .decrypt = ablkcipher_decrypt,
1895 .givencrypt = ablkcipher_givencrypt,
1896 .geniv = "<built-in>",
1897 .min_keysize = AES_MIN_KEY_SIZE +
1898 CTR_RFC3686_NONCE_SIZE,
1899 .max_keysize = AES_MAX_KEY_SIZE +
1900 CTR_RFC3686_NONCE_SIZE,
1901 .ivsize = CTR_RFC3686_IV_SIZE,
1902 },
1903 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1904 },
1905 {
1906 .name = "xts(aes)",
1907 .driver_name = "xts-aes-caam",
1908 .blocksize = AES_BLOCK_SIZE,
1909 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1910 .template_ablkcipher = {
1911 .setkey = xts_ablkcipher_setkey,
1912 .encrypt = ablkcipher_encrypt,
1913 .decrypt = ablkcipher_decrypt,
1914 .geniv = "eseqiv",
1915 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1916 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1917 .ivsize = AES_BLOCK_SIZE,
1918 },
1919 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1920 },
1921 };
1922
1923 static struct caam_aead_alg driver_aeads[] = {
1924 {
1925 .aead = {
1926 .base = {
1927 .cra_name = "rfc4106(gcm(aes))",
1928 .cra_driver_name = "rfc4106-gcm-aes-caam",
1929 .cra_blocksize = 1,
1930 },
1931 .setkey = rfc4106_setkey,
1932 .setauthsize = rfc4106_setauthsize,
1933 .encrypt = ipsec_gcm_encrypt,
1934 .decrypt = ipsec_gcm_decrypt,
1935 .ivsize = 8,
1936 .maxauthsize = AES_BLOCK_SIZE,
1937 },
1938 .caam = {
1939 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1940 },
1941 },
1942 {
1943 .aead = {
1944 .base = {
1945 .cra_name = "rfc4543(gcm(aes))",
1946 .cra_driver_name = "rfc4543-gcm-aes-caam",
1947 .cra_blocksize = 1,
1948 },
1949 .setkey = rfc4543_setkey,
1950 .setauthsize = rfc4543_setauthsize,
1951 .encrypt = ipsec_gcm_encrypt,
1952 .decrypt = ipsec_gcm_decrypt,
1953 .ivsize = 8,
1954 .maxauthsize = AES_BLOCK_SIZE,
1955 },
1956 .caam = {
1957 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1958 },
1959 },
1960 /* Galois Counter Mode */
1961 {
1962 .aead = {
1963 .base = {
1964 .cra_name = "gcm(aes)",
1965 .cra_driver_name = "gcm-aes-caam",
1966 .cra_blocksize = 1,
1967 },
1968 .setkey = gcm_setkey,
1969 .setauthsize = gcm_setauthsize,
1970 .encrypt = gcm_encrypt,
1971 .decrypt = gcm_decrypt,
1972 .ivsize = 12,
1973 .maxauthsize = AES_BLOCK_SIZE,
1974 },
1975 .caam = {
1976 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1977 },
1978 },
1979 /* single-pass ipsec_esp descriptor */
1980 {
1981 .aead = {
1982 .base = {
1983 .cra_name = "authenc(hmac(md5),"
1984 "ecb(cipher_null))",
1985 .cra_driver_name = "authenc-hmac-md5-"
1986 "ecb-cipher_null-caam",
1987 .cra_blocksize = NULL_BLOCK_SIZE,
1988 },
1989 .setkey = aead_setkey,
1990 .setauthsize = aead_setauthsize,
1991 .encrypt = aead_encrypt,
1992 .decrypt = aead_decrypt,
1993 .ivsize = NULL_IV_SIZE,
1994 .maxauthsize = MD5_DIGEST_SIZE,
1995 },
1996 .caam = {
1997 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1998 OP_ALG_AAI_HMAC_PRECOMP,
1999 },
2000 },
2001 {
2002 .aead = {
2003 .base = {
2004 .cra_name = "authenc(hmac(sha1),"
2005 "ecb(cipher_null))",
2006 .cra_driver_name = "authenc-hmac-sha1-"
2007 "ecb-cipher_null-caam",
2008 .cra_blocksize = NULL_BLOCK_SIZE,
2009 },
2010 .setkey = aead_setkey,
2011 .setauthsize = aead_setauthsize,
2012 .encrypt = aead_encrypt,
2013 .decrypt = aead_decrypt,
2014 .ivsize = NULL_IV_SIZE,
2015 .maxauthsize = SHA1_DIGEST_SIZE,
2016 },
2017 .caam = {
2018 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2019 OP_ALG_AAI_HMAC_PRECOMP,
2020 },
2021 },
2022 {
2023 .aead = {
2024 .base = {
2025 .cra_name = "authenc(hmac(sha224),"
2026 "ecb(cipher_null))",
2027 .cra_driver_name = "authenc-hmac-sha224-"
2028 "ecb-cipher_null-caam",
2029 .cra_blocksize = NULL_BLOCK_SIZE,
2030 },
2031 .setkey = aead_setkey,
2032 .setauthsize = aead_setauthsize,
2033 .encrypt = aead_encrypt,
2034 .decrypt = aead_decrypt,
2035 .ivsize = NULL_IV_SIZE,
2036 .maxauthsize = SHA224_DIGEST_SIZE,
2037 },
2038 .caam = {
2039 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2040 OP_ALG_AAI_HMAC_PRECOMP,
2041 },
2042 },
2043 {
2044 .aead = {
2045 .base = {
2046 .cra_name = "authenc(hmac(sha256),"
2047 "ecb(cipher_null))",
2048 .cra_driver_name = "authenc-hmac-sha256-"
2049 "ecb-cipher_null-caam",
2050 .cra_blocksize = NULL_BLOCK_SIZE,
2051 },
2052 .setkey = aead_setkey,
2053 .setauthsize = aead_setauthsize,
2054 .encrypt = aead_encrypt,
2055 .decrypt = aead_decrypt,
2056 .ivsize = NULL_IV_SIZE,
2057 .maxauthsize = SHA256_DIGEST_SIZE,
2058 },
2059 .caam = {
2060 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2061 OP_ALG_AAI_HMAC_PRECOMP,
2062 },
2063 },
2064 {
2065 .aead = {
2066 .base = {
2067 .cra_name = "authenc(hmac(sha384),"
2068 "ecb(cipher_null))",
2069 .cra_driver_name = "authenc-hmac-sha384-"
2070 "ecb-cipher_null-caam",
2071 .cra_blocksize = NULL_BLOCK_SIZE,
2072 },
2073 .setkey = aead_setkey,
2074 .setauthsize = aead_setauthsize,
2075 .encrypt = aead_encrypt,
2076 .decrypt = aead_decrypt,
2077 .ivsize = NULL_IV_SIZE,
2078 .maxauthsize = SHA384_DIGEST_SIZE,
2079 },
2080 .caam = {
2081 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2082 OP_ALG_AAI_HMAC_PRECOMP,
2083 },
2084 },
2085 {
2086 .aead = {
2087 .base = {
2088 .cra_name = "authenc(hmac(sha512),"
2089 "ecb(cipher_null))",
2090 .cra_driver_name = "authenc-hmac-sha512-"
2091 "ecb-cipher_null-caam",
2092 .cra_blocksize = NULL_BLOCK_SIZE,
2093 },
2094 .setkey = aead_setkey,
2095 .setauthsize = aead_setauthsize,
2096 .encrypt = aead_encrypt,
2097 .decrypt = aead_decrypt,
2098 .ivsize = NULL_IV_SIZE,
2099 .maxauthsize = SHA512_DIGEST_SIZE,
2100 },
2101 .caam = {
2102 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2103 OP_ALG_AAI_HMAC_PRECOMP,
2104 },
2105 },
2106 {
2107 .aead = {
2108 .base = {
2109 .cra_name = "authenc(hmac(md5),cbc(aes))",
2110 .cra_driver_name = "authenc-hmac-md5-"
2111 "cbc-aes-caam",
2112 .cra_blocksize = AES_BLOCK_SIZE,
2113 },
2114 .setkey = aead_setkey,
2115 .setauthsize = aead_setauthsize,
2116 .encrypt = aead_encrypt,
2117 .decrypt = aead_decrypt,
2118 .ivsize = AES_BLOCK_SIZE,
2119 .maxauthsize = MD5_DIGEST_SIZE,
2120 },
2121 .caam = {
2122 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2123 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2124 OP_ALG_AAI_HMAC_PRECOMP,
2125 },
2126 },
2127 {
2128 .aead = {
2129 .base = {
2130 .cra_name = "echainiv(authenc(hmac(md5),"
2131 "cbc(aes)))",
2132 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2133 "cbc-aes-caam",
2134 .cra_blocksize = AES_BLOCK_SIZE,
2135 },
2136 .setkey = aead_setkey,
2137 .setauthsize = aead_setauthsize,
2138 .encrypt = aead_encrypt,
2139 .decrypt = aead_decrypt,
2140 .ivsize = AES_BLOCK_SIZE,
2141 .maxauthsize = MD5_DIGEST_SIZE,
2142 },
2143 .caam = {
2144 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2145 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2146 OP_ALG_AAI_HMAC_PRECOMP,
2147 .geniv = true,
2148 },
2149 },
2150 {
2151 .aead = {
2152 .base = {
2153 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2154 .cra_driver_name = "authenc-hmac-sha1-"
2155 "cbc-aes-caam",
2156 .cra_blocksize = AES_BLOCK_SIZE,
2157 },
2158 .setkey = aead_setkey,
2159 .setauthsize = aead_setauthsize,
2160 .encrypt = aead_encrypt,
2161 .decrypt = aead_decrypt,
2162 .ivsize = AES_BLOCK_SIZE,
2163 .maxauthsize = SHA1_DIGEST_SIZE,
2164 },
2165 .caam = {
2166 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2167 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2168 OP_ALG_AAI_HMAC_PRECOMP,
2169 },
2170 },
2171 {
2172 .aead = {
2173 .base = {
2174 .cra_name = "echainiv(authenc(hmac(sha1),"
2175 "cbc(aes)))",
2176 .cra_driver_name = "echainiv-authenc-"
2177 "hmac-sha1-cbc-aes-caam",
2178 .cra_blocksize = AES_BLOCK_SIZE,
2179 },
2180 .setkey = aead_setkey,
2181 .setauthsize = aead_setauthsize,
2182 .encrypt = aead_encrypt,
2183 .decrypt = aead_decrypt,
2184 .ivsize = AES_BLOCK_SIZE,
2185 .maxauthsize = SHA1_DIGEST_SIZE,
2186 },
2187 .caam = {
2188 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2189 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2190 OP_ALG_AAI_HMAC_PRECOMP,
2191 .geniv = true,
2192 },
2193 },
2194 {
2195 .aead = {
2196 .base = {
2197 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2198 .cra_driver_name = "authenc-hmac-sha224-"
2199 "cbc-aes-caam",
2200 .cra_blocksize = AES_BLOCK_SIZE,
2201 },
2202 .setkey = aead_setkey,
2203 .setauthsize = aead_setauthsize,
2204 .encrypt = aead_encrypt,
2205 .decrypt = aead_decrypt,
2206 .ivsize = AES_BLOCK_SIZE,
2207 .maxauthsize = SHA224_DIGEST_SIZE,
2208 },
2209 .caam = {
2210 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2211 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2212 OP_ALG_AAI_HMAC_PRECOMP,
2213 },
2214 },
2215 {
2216 .aead = {
2217 .base = {
2218 .cra_name = "echainiv(authenc(hmac(sha224),"
2219 "cbc(aes)))",
2220 .cra_driver_name = "echainiv-authenc-"
2221 "hmac-sha224-cbc-aes-caam",
2222 .cra_blocksize = AES_BLOCK_SIZE,
2223 },
2224 .setkey = aead_setkey,
2225 .setauthsize = aead_setauthsize,
2226 .encrypt = aead_encrypt,
2227 .decrypt = aead_decrypt,
2228 .ivsize = AES_BLOCK_SIZE,
2229 .maxauthsize = SHA224_DIGEST_SIZE,
2230 },
2231 .caam = {
2232 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2233 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2234 OP_ALG_AAI_HMAC_PRECOMP,
2235 .geniv = true,
2236 },
2237 },
2238 {
2239 .aead = {
2240 .base = {
2241 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2242 .cra_driver_name = "authenc-hmac-sha256-"
2243 "cbc-aes-caam",
2244 .cra_blocksize = AES_BLOCK_SIZE,
2245 },
2246 .setkey = aead_setkey,
2247 .setauthsize = aead_setauthsize,
2248 .encrypt = aead_encrypt,
2249 .decrypt = aead_decrypt,
2250 .ivsize = AES_BLOCK_SIZE,
2251 .maxauthsize = SHA256_DIGEST_SIZE,
2252 },
2253 .caam = {
2254 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2255 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2256 OP_ALG_AAI_HMAC_PRECOMP,
2257 },
2258 },
2259 {
2260 .aead = {
2261 .base = {
2262 .cra_name = "echainiv(authenc(hmac(sha256),"
2263 "cbc(aes)))",
2264 .cra_driver_name = "echainiv-authenc-"
2265 "hmac-sha256-cbc-aes-caam",
2266 .cra_blocksize = AES_BLOCK_SIZE,
2267 },
2268 .setkey = aead_setkey,
2269 .setauthsize = aead_setauthsize,
2270 .encrypt = aead_encrypt,
2271 .decrypt = aead_decrypt,
2272 .ivsize = AES_BLOCK_SIZE,
2273 .maxauthsize = SHA256_DIGEST_SIZE,
2274 },
2275 .caam = {
2276 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2277 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2278 OP_ALG_AAI_HMAC_PRECOMP,
2279 .geniv = true,
2280 },
2281 },
2282 {
2283 .aead = {
2284 .base = {
2285 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2286 .cra_driver_name = "authenc-hmac-sha384-"
2287 "cbc-aes-caam",
2288 .cra_blocksize = AES_BLOCK_SIZE,
2289 },
2290 .setkey = aead_setkey,
2291 .setauthsize = aead_setauthsize,
2292 .encrypt = aead_encrypt,
2293 .decrypt = aead_decrypt,
2294 .ivsize = AES_BLOCK_SIZE,
2295 .maxauthsize = SHA384_DIGEST_SIZE,
2296 },
2297 .caam = {
2298 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2299 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2300 OP_ALG_AAI_HMAC_PRECOMP,
2301 },
2302 },
2303 {
2304 .aead = {
2305 .base = {
2306 .cra_name = "echainiv(authenc(hmac(sha384),"
2307 "cbc(aes)))",
2308 .cra_driver_name = "echainiv-authenc-"
2309 "hmac-sha384-cbc-aes-caam",
2310 .cra_blocksize = AES_BLOCK_SIZE,
2311 },
2312 .setkey = aead_setkey,
2313 .setauthsize = aead_setauthsize,
2314 .encrypt = aead_encrypt,
2315 .decrypt = aead_decrypt,
2316 .ivsize = AES_BLOCK_SIZE,
2317 .maxauthsize = SHA384_DIGEST_SIZE,
2318 },
2319 .caam = {
2320 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2321 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2322 OP_ALG_AAI_HMAC_PRECOMP,
2323 .geniv = true,
2324 },
2325 },
2326 {
2327 .aead = {
2328 .base = {
2329 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2330 .cra_driver_name = "authenc-hmac-sha512-"
2331 "cbc-aes-caam",
2332 .cra_blocksize = AES_BLOCK_SIZE,
2333 },
2334 .setkey = aead_setkey,
2335 .setauthsize = aead_setauthsize,
2336 .encrypt = aead_encrypt,
2337 .decrypt = aead_decrypt,
2338 .ivsize = AES_BLOCK_SIZE,
2339 .maxauthsize = SHA512_DIGEST_SIZE,
2340 },
2341 .caam = {
2342 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2343 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2344 OP_ALG_AAI_HMAC_PRECOMP,
2345 },
2346 },
2347 {
2348 .aead = {
2349 .base = {
2350 .cra_name = "echainiv(authenc(hmac(sha512),"
2351 "cbc(aes)))",
2352 .cra_driver_name = "echainiv-authenc-"
2353 "hmac-sha512-cbc-aes-caam",
2354 .cra_blocksize = AES_BLOCK_SIZE,
2355 },
2356 .setkey = aead_setkey,
2357 .setauthsize = aead_setauthsize,
2358 .encrypt = aead_encrypt,
2359 .decrypt = aead_decrypt,
2360 .ivsize = AES_BLOCK_SIZE,
2361 .maxauthsize = SHA512_DIGEST_SIZE,
2362 },
2363 .caam = {
2364 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2365 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2366 OP_ALG_AAI_HMAC_PRECOMP,
2367 .geniv = true,
2368 },
2369 },
2370 {
2371 .aead = {
2372 .base = {
2373 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2374 .cra_driver_name = "authenc-hmac-md5-"
2375 "cbc-des3_ede-caam",
2376 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2377 },
2378 .setkey = aead_setkey,
2379 .setauthsize = aead_setauthsize,
2380 .encrypt = aead_encrypt,
2381 .decrypt = aead_decrypt,
2382 .ivsize = DES3_EDE_BLOCK_SIZE,
2383 .maxauthsize = MD5_DIGEST_SIZE,
2384 },
2385 .caam = {
2386 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2387 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2388 OP_ALG_AAI_HMAC_PRECOMP,
2389 }
2390 },
2391 {
2392 .aead = {
2393 .base = {
2394 .cra_name = "echainiv(authenc(hmac(md5),"
2395 "cbc(des3_ede)))",
2396 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2397 "cbc-des3_ede-caam",
2398 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2399 },
2400 .setkey = aead_setkey,
2401 .setauthsize = aead_setauthsize,
2402 .encrypt = aead_encrypt,
2403 .decrypt = aead_decrypt,
2404 .ivsize = DES3_EDE_BLOCK_SIZE,
2405 .maxauthsize = MD5_DIGEST_SIZE,
2406 },
2407 .caam = {
2408 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2409 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2410 OP_ALG_AAI_HMAC_PRECOMP,
2411 .geniv = true,
2412 }
2413 },
2414 {
2415 .aead = {
2416 .base = {
2417 .cra_name = "authenc(hmac(sha1),"
2418 "cbc(des3_ede))",
2419 .cra_driver_name = "authenc-hmac-sha1-"
2420 "cbc-des3_ede-caam",
2421 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2422 },
2423 .setkey = aead_setkey,
2424 .setauthsize = aead_setauthsize,
2425 .encrypt = aead_encrypt,
2426 .decrypt = aead_decrypt,
2427 .ivsize = DES3_EDE_BLOCK_SIZE,
2428 .maxauthsize = SHA1_DIGEST_SIZE,
2429 },
2430 .caam = {
2431 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2432 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2433 OP_ALG_AAI_HMAC_PRECOMP,
2434 },
2435 },
2436 {
2437 .aead = {
2438 .base = {
2439 .cra_name = "echainiv(authenc(hmac(sha1),"
2440 "cbc(des3_ede)))",
2441 .cra_driver_name = "echainiv-authenc-"
2442 "hmac-sha1-"
2443 "cbc-des3_ede-caam",
2444 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2445 },
2446 .setkey = aead_setkey,
2447 .setauthsize = aead_setauthsize,
2448 .encrypt = aead_encrypt,
2449 .decrypt = aead_decrypt,
2450 .ivsize = DES3_EDE_BLOCK_SIZE,
2451 .maxauthsize = SHA1_DIGEST_SIZE,
2452 },
2453 .caam = {
2454 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2455 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2456 OP_ALG_AAI_HMAC_PRECOMP,
2457 .geniv = true,
2458 },
2459 },
2460 {
2461 .aead = {
2462 .base = {
2463 .cra_name = "authenc(hmac(sha224),"
2464 "cbc(des3_ede))",
2465 .cra_driver_name = "authenc-hmac-sha224-"
2466 "cbc-des3_ede-caam",
2467 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2468 },
2469 .setkey = aead_setkey,
2470 .setauthsize = aead_setauthsize,
2471 .encrypt = aead_encrypt,
2472 .decrypt = aead_decrypt,
2473 .ivsize = DES3_EDE_BLOCK_SIZE,
2474 .maxauthsize = SHA224_DIGEST_SIZE,
2475 },
2476 .caam = {
2477 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2478 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2479 OP_ALG_AAI_HMAC_PRECOMP,
2480 },
2481 },
2482 {
2483 .aead = {
2484 .base = {
2485 .cra_name = "echainiv(authenc(hmac(sha224),"
2486 "cbc(des3_ede)))",
2487 .cra_driver_name = "echainiv-authenc-"
2488 "hmac-sha224-"
2489 "cbc-des3_ede-caam",
2490 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2491 },
2492 .setkey = aead_setkey,
2493 .setauthsize = aead_setauthsize,
2494 .encrypt = aead_encrypt,
2495 .decrypt = aead_decrypt,
2496 .ivsize = DES3_EDE_BLOCK_SIZE,
2497 .maxauthsize = SHA224_DIGEST_SIZE,
2498 },
2499 .caam = {
2500 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2501 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2502 OP_ALG_AAI_HMAC_PRECOMP,
2503 .geniv = true,
2504 },
2505 },
2506 {
2507 .aead = {
2508 .base = {
2509 .cra_name = "authenc(hmac(sha256),"
2510 "cbc(des3_ede))",
2511 .cra_driver_name = "authenc-hmac-sha256-"
2512 "cbc-des3_ede-caam",
2513 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2514 },
2515 .setkey = aead_setkey,
2516 .setauthsize = aead_setauthsize,
2517 .encrypt = aead_encrypt,
2518 .decrypt = aead_decrypt,
2519 .ivsize = DES3_EDE_BLOCK_SIZE,
2520 .maxauthsize = SHA256_DIGEST_SIZE,
2521 },
2522 .caam = {
2523 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2524 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2525 OP_ALG_AAI_HMAC_PRECOMP,
2526 },
2527 },
2528 {
2529 .aead = {
2530 .base = {
2531 .cra_name = "echainiv(authenc(hmac(sha256),"
2532 "cbc(des3_ede)))",
2533 .cra_driver_name = "echainiv-authenc-"
2534 "hmac-sha256-"
2535 "cbc-des3_ede-caam",
2536 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2537 },
2538 .setkey = aead_setkey,
2539 .setauthsize = aead_setauthsize,
2540 .encrypt = aead_encrypt,
2541 .decrypt = aead_decrypt,
2542 .ivsize = DES3_EDE_BLOCK_SIZE,
2543 .maxauthsize = SHA256_DIGEST_SIZE,
2544 },
2545 .caam = {
2546 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2547 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2548 OP_ALG_AAI_HMAC_PRECOMP,
2549 .geniv = true,
2550 },
2551 },
2552 {
2553 .aead = {
2554 .base = {
2555 .cra_name = "authenc(hmac(sha384),"
2556 "cbc(des3_ede))",
2557 .cra_driver_name = "authenc-hmac-sha384-"
2558 "cbc-des3_ede-caam",
2559 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2560 },
2561 .setkey = aead_setkey,
2562 .setauthsize = aead_setauthsize,
2563 .encrypt = aead_encrypt,
2564 .decrypt = aead_decrypt,
2565 .ivsize = DES3_EDE_BLOCK_SIZE,
2566 .maxauthsize = SHA384_DIGEST_SIZE,
2567 },
2568 .caam = {
2569 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2570 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2571 OP_ALG_AAI_HMAC_PRECOMP,
2572 },
2573 },
2574 {
2575 .aead = {
2576 .base = {
2577 .cra_name = "echainiv(authenc(hmac(sha384),"
2578 "cbc(des3_ede)))",
2579 .cra_driver_name = "echainiv-authenc-"
2580 "hmac-sha384-"
2581 "cbc-des3_ede-caam",
2582 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2583 },
2584 .setkey = aead_setkey,
2585 .setauthsize = aead_setauthsize,
2586 .encrypt = aead_encrypt,
2587 .decrypt = aead_decrypt,
2588 .ivsize = DES3_EDE_BLOCK_SIZE,
2589 .maxauthsize = SHA384_DIGEST_SIZE,
2590 },
2591 .caam = {
2592 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2593 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2594 OP_ALG_AAI_HMAC_PRECOMP,
2595 .geniv = true,
2596 },
2597 },
2598 {
2599 .aead = {
2600 .base = {
2601 .cra_name = "authenc(hmac(sha512),"
2602 "cbc(des3_ede))",
2603 .cra_driver_name = "authenc-hmac-sha512-"
2604 "cbc-des3_ede-caam",
2605 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2606 },
2607 .setkey = aead_setkey,
2608 .setauthsize = aead_setauthsize,
2609 .encrypt = aead_encrypt,
2610 .decrypt = aead_decrypt,
2611 .ivsize = DES3_EDE_BLOCK_SIZE,
2612 .maxauthsize = SHA512_DIGEST_SIZE,
2613 },
2614 .caam = {
2615 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2616 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2617 OP_ALG_AAI_HMAC_PRECOMP,
2618 },
2619 },
2620 {
2621 .aead = {
2622 .base = {
2623 .cra_name = "echainiv(authenc(hmac(sha512),"
2624 "cbc(des3_ede)))",
2625 .cra_driver_name = "echainiv-authenc-"
2626 "hmac-sha512-"
2627 "cbc-des3_ede-caam",
2628 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2629 },
2630 .setkey = aead_setkey,
2631 .setauthsize = aead_setauthsize,
2632 .encrypt = aead_encrypt,
2633 .decrypt = aead_decrypt,
2634 .ivsize = DES3_EDE_BLOCK_SIZE,
2635 .maxauthsize = SHA512_DIGEST_SIZE,
2636 },
2637 .caam = {
2638 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2639 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2640 OP_ALG_AAI_HMAC_PRECOMP,
2641 .geniv = true,
2642 },
2643 },
2644 {
2645 .aead = {
2646 .base = {
2647 .cra_name = "authenc(hmac(md5),cbc(des))",
2648 .cra_driver_name = "authenc-hmac-md5-"
2649 "cbc-des-caam",
2650 .cra_blocksize = DES_BLOCK_SIZE,
2651 },
2652 .setkey = aead_setkey,
2653 .setauthsize = aead_setauthsize,
2654 .encrypt = aead_encrypt,
2655 .decrypt = aead_decrypt,
2656 .ivsize = DES_BLOCK_SIZE,
2657 .maxauthsize = MD5_DIGEST_SIZE,
2658 },
2659 .caam = {
2660 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2661 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2662 OP_ALG_AAI_HMAC_PRECOMP,
2663 },
2664 },
2665 {
2666 .aead = {
2667 .base = {
2668 .cra_name = "echainiv(authenc(hmac(md5),"
2669 "cbc(des)))",
2670 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2671 "cbc-des-caam",
2672 .cra_blocksize = DES_BLOCK_SIZE,
2673 },
2674 .setkey = aead_setkey,
2675 .setauthsize = aead_setauthsize,
2676 .encrypt = aead_encrypt,
2677 .decrypt = aead_decrypt,
2678 .ivsize = DES_BLOCK_SIZE,
2679 .maxauthsize = MD5_DIGEST_SIZE,
2680 },
2681 .caam = {
2682 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2683 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2684 OP_ALG_AAI_HMAC_PRECOMP,
2685 .geniv = true,
2686 },
2687 },
2688 {
2689 .aead = {
2690 .base = {
2691 .cra_name = "authenc(hmac(sha1),cbc(des))",
2692 .cra_driver_name = "authenc-hmac-sha1-"
2693 "cbc-des-caam",
2694 .cra_blocksize = DES_BLOCK_SIZE,
2695 },
2696 .setkey = aead_setkey,
2697 .setauthsize = aead_setauthsize,
2698 .encrypt = aead_encrypt,
2699 .decrypt = aead_decrypt,
2700 .ivsize = DES_BLOCK_SIZE,
2701 .maxauthsize = SHA1_DIGEST_SIZE,
2702 },
2703 .caam = {
2704 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2705 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2706 OP_ALG_AAI_HMAC_PRECOMP,
2707 },
2708 },
2709 {
2710 .aead = {
2711 .base = {
2712 .cra_name = "echainiv(authenc(hmac(sha1),"
2713 "cbc(des)))",
2714 .cra_driver_name = "echainiv-authenc-"
2715 "hmac-sha1-cbc-des-caam",
2716 .cra_blocksize = DES_BLOCK_SIZE,
2717 },
2718 .setkey = aead_setkey,
2719 .setauthsize = aead_setauthsize,
2720 .encrypt = aead_encrypt,
2721 .decrypt = aead_decrypt,
2722 .ivsize = DES_BLOCK_SIZE,
2723 .maxauthsize = SHA1_DIGEST_SIZE,
2724 },
2725 .caam = {
2726 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2727 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2728 OP_ALG_AAI_HMAC_PRECOMP,
2729 .geniv = true,
2730 },
2731 },
2732 {
2733 .aead = {
2734 .base = {
2735 .cra_name = "authenc(hmac(sha224),cbc(des))",
2736 .cra_driver_name = "authenc-hmac-sha224-"
2737 "cbc-des-caam",
2738 .cra_blocksize = DES_BLOCK_SIZE,
2739 },
2740 .setkey = aead_setkey,
2741 .setauthsize = aead_setauthsize,
2742 .encrypt = aead_encrypt,
2743 .decrypt = aead_decrypt,
2744 .ivsize = DES_BLOCK_SIZE,
2745 .maxauthsize = SHA224_DIGEST_SIZE,
2746 },
2747 .caam = {
2748 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2749 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2750 OP_ALG_AAI_HMAC_PRECOMP,
2751 },
2752 },
2753 {
2754 .aead = {
2755 .base = {
2756 .cra_name = "echainiv(authenc(hmac(sha224),"
2757 "cbc(des)))",
2758 .cra_driver_name = "echainiv-authenc-"
2759 "hmac-sha224-cbc-des-caam",
2760 .cra_blocksize = DES_BLOCK_SIZE,
2761 },
2762 .setkey = aead_setkey,
2763 .setauthsize = aead_setauthsize,
2764 .encrypt = aead_encrypt,
2765 .decrypt = aead_decrypt,
2766 .ivsize = DES_BLOCK_SIZE,
2767 .maxauthsize = SHA224_DIGEST_SIZE,
2768 },
2769 .caam = {
2770 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2771 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2772 OP_ALG_AAI_HMAC_PRECOMP,
2773 .geniv = true,
2774 },
2775 },
2776 {
2777 .aead = {
2778 .base = {
2779 .cra_name = "authenc(hmac(sha256),cbc(des))",
2780 .cra_driver_name = "authenc-hmac-sha256-"
2781 "cbc-des-caam",
2782 .cra_blocksize = DES_BLOCK_SIZE,
2783 },
2784 .setkey = aead_setkey,
2785 .setauthsize = aead_setauthsize,
2786 .encrypt = aead_encrypt,
2787 .decrypt = aead_decrypt,
2788 .ivsize = DES_BLOCK_SIZE,
2789 .maxauthsize = SHA256_DIGEST_SIZE,
2790 },
2791 .caam = {
2792 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2793 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2794 OP_ALG_AAI_HMAC_PRECOMP,
2795 },
2796 },
2797 {
2798 .aead = {
2799 .base = {
2800 .cra_name = "echainiv(authenc(hmac(sha256),"
2801 "cbc(des)))",
2802 .cra_driver_name = "echainiv-authenc-"
2803 "hmac-sha256-cbc-des-caam",
2804 .cra_blocksize = DES_BLOCK_SIZE,
2805 },
2806 .setkey = aead_setkey,
2807 .setauthsize = aead_setauthsize,
2808 .encrypt = aead_encrypt,
2809 .decrypt = aead_decrypt,
2810 .ivsize = DES_BLOCK_SIZE,
2811 .maxauthsize = SHA256_DIGEST_SIZE,
2812 },
2813 .caam = {
2814 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2815 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2816 OP_ALG_AAI_HMAC_PRECOMP,
2817 .geniv = true,
2818 },
2819 },
2820 {
2821 .aead = {
2822 .base = {
2823 .cra_name = "authenc(hmac(sha384),cbc(des))",
2824 .cra_driver_name = "authenc-hmac-sha384-"
2825 "cbc-des-caam",
2826 .cra_blocksize = DES_BLOCK_SIZE,
2827 },
2828 .setkey = aead_setkey,
2829 .setauthsize = aead_setauthsize,
2830 .encrypt = aead_encrypt,
2831 .decrypt = aead_decrypt,
2832 .ivsize = DES_BLOCK_SIZE,
2833 .maxauthsize = SHA384_DIGEST_SIZE,
2834 },
2835 .caam = {
2836 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2837 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2838 OP_ALG_AAI_HMAC_PRECOMP,
2839 },
2840 },
2841 {
2842 .aead = {
2843 .base = {
2844 .cra_name = "echainiv(authenc(hmac(sha384),"
2845 "cbc(des)))",
2846 .cra_driver_name = "echainiv-authenc-"
2847 "hmac-sha384-cbc-des-caam",
2848 .cra_blocksize = DES_BLOCK_SIZE,
2849 },
2850 .setkey = aead_setkey,
2851 .setauthsize = aead_setauthsize,
2852 .encrypt = aead_encrypt,
2853 .decrypt = aead_decrypt,
2854 .ivsize = DES_BLOCK_SIZE,
2855 .maxauthsize = SHA384_DIGEST_SIZE,
2856 },
2857 .caam = {
2858 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2859 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2860 OP_ALG_AAI_HMAC_PRECOMP,
2861 .geniv = true,
2862 },
2863 },
2864 {
2865 .aead = {
2866 .base = {
2867 .cra_name = "authenc(hmac(sha512),cbc(des))",
2868 .cra_driver_name = "authenc-hmac-sha512-"
2869 "cbc-des-caam",
2870 .cra_blocksize = DES_BLOCK_SIZE,
2871 },
2872 .setkey = aead_setkey,
2873 .setauthsize = aead_setauthsize,
2874 .encrypt = aead_encrypt,
2875 .decrypt = aead_decrypt,
2876 .ivsize = DES_BLOCK_SIZE,
2877 .maxauthsize = SHA512_DIGEST_SIZE,
2878 },
2879 .caam = {
2880 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2881 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2882 OP_ALG_AAI_HMAC_PRECOMP,
2883 },
2884 },
2885 {
2886 .aead = {
2887 .base = {
2888 .cra_name = "echainiv(authenc(hmac(sha512),"
2889 "cbc(des)))",
2890 .cra_driver_name = "echainiv-authenc-"
2891 "hmac-sha512-cbc-des-caam",
2892 .cra_blocksize = DES_BLOCK_SIZE,
2893 },
2894 .setkey = aead_setkey,
2895 .setauthsize = aead_setauthsize,
2896 .encrypt = aead_encrypt,
2897 .decrypt = aead_decrypt,
2898 .ivsize = DES_BLOCK_SIZE,
2899 .maxauthsize = SHA512_DIGEST_SIZE,
2900 },
2901 .caam = {
2902 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2903 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2904 OP_ALG_AAI_HMAC_PRECOMP,
2905 .geniv = true,
2906 },
2907 },
2908 {
2909 .aead = {
2910 .base = {
2911 .cra_name = "authenc(hmac(md5),"
2912 "rfc3686(ctr(aes)))",
2913 .cra_driver_name = "authenc-hmac-md5-"
2914 "rfc3686-ctr-aes-caam",
2915 .cra_blocksize = 1,
2916 },
2917 .setkey = aead_setkey,
2918 .setauthsize = aead_setauthsize,
2919 .encrypt = aead_encrypt,
2920 .decrypt = aead_decrypt,
2921 .ivsize = CTR_RFC3686_IV_SIZE,
2922 .maxauthsize = MD5_DIGEST_SIZE,
2923 },
2924 .caam = {
2925 .class1_alg_type = OP_ALG_ALGSEL_AES |
2926 OP_ALG_AAI_CTR_MOD128,
2927 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2928 OP_ALG_AAI_HMAC_PRECOMP,
2929 .rfc3686 = true,
2930 },
2931 },
2932 {
2933 .aead = {
2934 .base = {
2935 .cra_name = "seqiv(authenc("
2936 "hmac(md5),rfc3686(ctr(aes))))",
2937 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2938 "rfc3686-ctr-aes-caam",
2939 .cra_blocksize = 1,
2940 },
2941 .setkey = aead_setkey,
2942 .setauthsize = aead_setauthsize,
2943 .encrypt = aead_encrypt,
2944 .decrypt = aead_decrypt,
2945 .ivsize = CTR_RFC3686_IV_SIZE,
2946 .maxauthsize = MD5_DIGEST_SIZE,
2947 },
2948 .caam = {
2949 .class1_alg_type = OP_ALG_ALGSEL_AES |
2950 OP_ALG_AAI_CTR_MOD128,
2951 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2952 OP_ALG_AAI_HMAC_PRECOMP,
2953 .rfc3686 = true,
2954 .geniv = true,
2955 },
2956 },
2957 {
2958 .aead = {
2959 .base = {
2960 .cra_name = "authenc(hmac(sha1),"
2961 "rfc3686(ctr(aes)))",
2962 .cra_driver_name = "authenc-hmac-sha1-"
2963 "rfc3686-ctr-aes-caam",
2964 .cra_blocksize = 1,
2965 },
2966 .setkey = aead_setkey,
2967 .setauthsize = aead_setauthsize,
2968 .encrypt = aead_encrypt,
2969 .decrypt = aead_decrypt,
2970 .ivsize = CTR_RFC3686_IV_SIZE,
2971 .maxauthsize = SHA1_DIGEST_SIZE,
2972 },
2973 .caam = {
2974 .class1_alg_type = OP_ALG_ALGSEL_AES |
2975 OP_ALG_AAI_CTR_MOD128,
2976 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2977 OP_ALG_AAI_HMAC_PRECOMP,
2978 .rfc3686 = true,
2979 },
2980 },
2981 {
2982 .aead = {
2983 .base = {
2984 .cra_name = "seqiv(authenc("
2985 "hmac(sha1),rfc3686(ctr(aes))))",
2986 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2987 "rfc3686-ctr-aes-caam",
2988 .cra_blocksize = 1,
2989 },
2990 .setkey = aead_setkey,
2991 .setauthsize = aead_setauthsize,
2992 .encrypt = aead_encrypt,
2993 .decrypt = aead_decrypt,
2994 .ivsize = CTR_RFC3686_IV_SIZE,
2995 .maxauthsize = SHA1_DIGEST_SIZE,
2996 },
2997 .caam = {
2998 .class1_alg_type = OP_ALG_ALGSEL_AES |
2999 OP_ALG_AAI_CTR_MOD128,
3000 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3001 OP_ALG_AAI_HMAC_PRECOMP,
3002 .rfc3686 = true,
3003 .geniv = true,
3004 },
3005 },
3006 {
3007 .aead = {
3008 .base = {
3009 .cra_name = "authenc(hmac(sha224),"
3010 "rfc3686(ctr(aes)))",
3011 .cra_driver_name = "authenc-hmac-sha224-"
3012 "rfc3686-ctr-aes-caam",
3013 .cra_blocksize = 1,
3014 },
3015 .setkey = aead_setkey,
3016 .setauthsize = aead_setauthsize,
3017 .encrypt = aead_encrypt,
3018 .decrypt = aead_decrypt,
3019 .ivsize = CTR_RFC3686_IV_SIZE,
3020 .maxauthsize = SHA224_DIGEST_SIZE,
3021 },
3022 .caam = {
3023 .class1_alg_type = OP_ALG_ALGSEL_AES |
3024 OP_ALG_AAI_CTR_MOD128,
3025 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3026 OP_ALG_AAI_HMAC_PRECOMP,
3027 .rfc3686 = true,
3028 },
3029 },
3030 {
3031 .aead = {
3032 .base = {
3033 .cra_name = "seqiv(authenc("
3034 "hmac(sha224),rfc3686(ctr(aes))))",
3035 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
3036 "rfc3686-ctr-aes-caam",
3037 .cra_blocksize = 1,
3038 },
3039 .setkey = aead_setkey,
3040 .setauthsize = aead_setauthsize,
3041 .encrypt = aead_encrypt,
3042 .decrypt = aead_decrypt,
3043 .ivsize = CTR_RFC3686_IV_SIZE,
3044 .maxauthsize = SHA224_DIGEST_SIZE,
3045 },
3046 .caam = {
3047 .class1_alg_type = OP_ALG_ALGSEL_AES |
3048 OP_ALG_AAI_CTR_MOD128,
3049 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3050 OP_ALG_AAI_HMAC_PRECOMP,
3051 .rfc3686 = true,
3052 .geniv = true,
3053 },
3054 },
3055 {
3056 .aead = {
3057 .base = {
3058 .cra_name = "authenc(hmac(sha256),"
3059 "rfc3686(ctr(aes)))",
3060 .cra_driver_name = "authenc-hmac-sha256-"
3061 "rfc3686-ctr-aes-caam",
3062 .cra_blocksize = 1,
3063 },
3064 .setkey = aead_setkey,
3065 .setauthsize = aead_setauthsize,
3066 .encrypt = aead_encrypt,
3067 .decrypt = aead_decrypt,
3068 .ivsize = CTR_RFC3686_IV_SIZE,
3069 .maxauthsize = SHA256_DIGEST_SIZE,
3070 },
3071 .caam = {
3072 .class1_alg_type = OP_ALG_ALGSEL_AES |
3073 OP_ALG_AAI_CTR_MOD128,
3074 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3075 OP_ALG_AAI_HMAC_PRECOMP,
3076 .rfc3686 = true,
3077 },
3078 },
3079 {
3080 .aead = {
3081 .base = {
3082 .cra_name = "seqiv(authenc(hmac(sha256),"
3083 "rfc3686(ctr(aes))))",
3084 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
3085 "rfc3686-ctr-aes-caam",
3086 .cra_blocksize = 1,
3087 },
3088 .setkey = aead_setkey,
3089 .setauthsize = aead_setauthsize,
3090 .encrypt = aead_encrypt,
3091 .decrypt = aead_decrypt,
3092 .ivsize = CTR_RFC3686_IV_SIZE,
3093 .maxauthsize = SHA256_DIGEST_SIZE,
3094 },
3095 .caam = {
3096 .class1_alg_type = OP_ALG_ALGSEL_AES |
3097 OP_ALG_AAI_CTR_MOD128,
3098 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3099 OP_ALG_AAI_HMAC_PRECOMP,
3100 .rfc3686 = true,
3101 .geniv = true,
3102 },
3103 },
3104 {
3105 .aead = {
3106 .base = {
3107 .cra_name = "authenc(hmac(sha384),"
3108 "rfc3686(ctr(aes)))",
3109 .cra_driver_name = "authenc-hmac-sha384-"
3110 "rfc3686-ctr-aes-caam",
3111 .cra_blocksize = 1,
3112 },
3113 .setkey = aead_setkey,
3114 .setauthsize = aead_setauthsize,
3115 .encrypt = aead_encrypt,
3116 .decrypt = aead_decrypt,
3117 .ivsize = CTR_RFC3686_IV_SIZE,
3118 .maxauthsize = SHA384_DIGEST_SIZE,
3119 },
3120 .caam = {
3121 .class1_alg_type = OP_ALG_ALGSEL_AES |
3122 OP_ALG_AAI_CTR_MOD128,
3123 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3124 OP_ALG_AAI_HMAC_PRECOMP,
3125 .rfc3686 = true,
3126 },
3127 },
3128 {
3129 .aead = {
3130 .base = {
3131 .cra_name = "seqiv(authenc(hmac(sha384),"
3132 "rfc3686(ctr(aes))))",
3133 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
3134 "rfc3686-ctr-aes-caam",
3135 .cra_blocksize = 1,
3136 },
3137 .setkey = aead_setkey,
3138 .setauthsize = aead_setauthsize,
3139 .encrypt = aead_encrypt,
3140 .decrypt = aead_decrypt,
3141 .ivsize = CTR_RFC3686_IV_SIZE,
3142 .maxauthsize = SHA384_DIGEST_SIZE,
3143 },
3144 .caam = {
3145 .class1_alg_type = OP_ALG_ALGSEL_AES |
3146 OP_ALG_AAI_CTR_MOD128,
3147 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3148 OP_ALG_AAI_HMAC_PRECOMP,
3149 .rfc3686 = true,
3150 .geniv = true,
3151 },
3152 },
3153 {
3154 .aead = {
3155 .base = {
3156 .cra_name = "authenc(hmac(sha512),"
3157 "rfc3686(ctr(aes)))",
3158 .cra_driver_name = "authenc-hmac-sha512-"
3159 "rfc3686-ctr-aes-caam",
3160 .cra_blocksize = 1,
3161 },
3162 .setkey = aead_setkey,
3163 .setauthsize = aead_setauthsize,
3164 .encrypt = aead_encrypt,
3165 .decrypt = aead_decrypt,
3166 .ivsize = CTR_RFC3686_IV_SIZE,
3167 .maxauthsize = SHA512_DIGEST_SIZE,
3168 },
3169 .caam = {
3170 .class1_alg_type = OP_ALG_ALGSEL_AES |
3171 OP_ALG_AAI_CTR_MOD128,
3172 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3173 OP_ALG_AAI_HMAC_PRECOMP,
3174 .rfc3686 = true,
3175 },
3176 },
3177 {
3178 .aead = {
3179 .base = {
3180 .cra_name = "seqiv(authenc(hmac(sha512),"
3181 "rfc3686(ctr(aes))))",
3182 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
3183 "rfc3686-ctr-aes-caam",
3184 .cra_blocksize = 1,
3185 },
3186 .setkey = aead_setkey,
3187 .setauthsize = aead_setauthsize,
3188 .encrypt = aead_encrypt,
3189 .decrypt = aead_decrypt,
3190 .ivsize = CTR_RFC3686_IV_SIZE,
3191 .maxauthsize = SHA512_DIGEST_SIZE,
3192 },
3193 .caam = {
3194 .class1_alg_type = OP_ALG_ALGSEL_AES |
3195 OP_ALG_AAI_CTR_MOD128,
3196 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3197 OP_ALG_AAI_HMAC_PRECOMP,
3198 .rfc3686 = true,
3199 .geniv = true,
3200 },
3201 },
3202 };
3203
3204 struct caam_crypto_alg {
3205 struct crypto_alg crypto_alg;
3206 struct list_head entry;
3207 struct caam_alg_entry caam;
3208 };
3209
caam_init_common(struct caam_ctx * ctx,struct caam_alg_entry * caam)3210 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
3211 {
3212 dma_addr_t dma_addr;
3213
3214 ctx->jrdev = caam_jr_alloc();
3215 if (IS_ERR(ctx->jrdev)) {
3216 pr_err("Job Ring Device allocation for transform failed\n");
3217 return PTR_ERR(ctx->jrdev);
3218 }
3219
3220 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
3221 offsetof(struct caam_ctx,
3222 sh_desc_enc_dma),
3223 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
3224 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
3225 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
3226 caam_jr_free(ctx->jrdev);
3227 return -ENOMEM;
3228 }
3229
3230 ctx->sh_desc_enc_dma = dma_addr;
3231 ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
3232 sh_desc_dec);
3233 ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
3234 sh_desc_givenc);
3235 ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
3236
3237 /* copy descriptor header template value */
3238 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
3239 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
3240
3241 return 0;
3242 }
3243
caam_cra_init(struct crypto_tfm * tfm)3244 static int caam_cra_init(struct crypto_tfm *tfm)
3245 {
3246 struct crypto_alg *alg = tfm->__crt_alg;
3247 struct caam_crypto_alg *caam_alg =
3248 container_of(alg, struct caam_crypto_alg, crypto_alg);
3249 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
3250
3251 return caam_init_common(ctx, &caam_alg->caam);
3252 }
3253
caam_aead_init(struct crypto_aead * tfm)3254 static int caam_aead_init(struct crypto_aead *tfm)
3255 {
3256 struct aead_alg *alg = crypto_aead_alg(tfm);
3257 struct caam_aead_alg *caam_alg =
3258 container_of(alg, struct caam_aead_alg, aead);
3259 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
3260
3261 return caam_init_common(ctx, &caam_alg->caam);
3262 }
3263
caam_exit_common(struct caam_ctx * ctx)3264 static void caam_exit_common(struct caam_ctx *ctx)
3265 {
3266 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
3267 offsetof(struct caam_ctx, sh_desc_enc_dma),
3268 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
3269 caam_jr_free(ctx->jrdev);
3270 }
3271
caam_cra_exit(struct crypto_tfm * tfm)3272 static void caam_cra_exit(struct crypto_tfm *tfm)
3273 {
3274 caam_exit_common(crypto_tfm_ctx(tfm));
3275 }
3276
caam_aead_exit(struct crypto_aead * tfm)3277 static void caam_aead_exit(struct crypto_aead *tfm)
3278 {
3279 caam_exit_common(crypto_aead_ctx(tfm));
3280 }
3281
caam_algapi_exit(void)3282 static void __exit caam_algapi_exit(void)
3283 {
3284
3285 struct caam_crypto_alg *t_alg, *n;
3286 int i;
3287
3288 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
3289 struct caam_aead_alg *t_alg = driver_aeads + i;
3290
3291 if (t_alg->registered)
3292 crypto_unregister_aead(&t_alg->aead);
3293 }
3294
3295 if (!alg_list.next)
3296 return;
3297
3298 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
3299 crypto_unregister_alg(&t_alg->crypto_alg);
3300 list_del(&t_alg->entry);
3301 kfree(t_alg);
3302 }
3303 }
3304
caam_alg_alloc(struct caam_alg_template * template)3305 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
3306 *template)
3307 {
3308 struct caam_crypto_alg *t_alg;
3309 struct crypto_alg *alg;
3310
3311 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
3312 if (!t_alg) {
3313 pr_err("failed to allocate t_alg\n");
3314 return ERR_PTR(-ENOMEM);
3315 }
3316
3317 alg = &t_alg->crypto_alg;
3318
3319 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
3320 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
3321 template->driver_name);
3322 alg->cra_module = THIS_MODULE;
3323 alg->cra_init = caam_cra_init;
3324 alg->cra_exit = caam_cra_exit;
3325 alg->cra_priority = CAAM_CRA_PRIORITY;
3326 alg->cra_blocksize = template->blocksize;
3327 alg->cra_alignmask = 0;
3328 alg->cra_ctxsize = sizeof(struct caam_ctx);
3329 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
3330 template->type;
3331 switch (template->type) {
3332 case CRYPTO_ALG_TYPE_GIVCIPHER:
3333 alg->cra_type = &crypto_givcipher_type;
3334 alg->cra_ablkcipher = template->template_ablkcipher;
3335 break;
3336 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3337 alg->cra_type = &crypto_ablkcipher_type;
3338 alg->cra_ablkcipher = template->template_ablkcipher;
3339 break;
3340 }
3341
3342 t_alg->caam.class1_alg_type = template->class1_alg_type;
3343 t_alg->caam.class2_alg_type = template->class2_alg_type;
3344
3345 return t_alg;
3346 }
3347
caam_aead_alg_init(struct caam_aead_alg * t_alg)3348 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3349 {
3350 struct aead_alg *alg = &t_alg->aead;
3351
3352 alg->base.cra_module = THIS_MODULE;
3353 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3354 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3355 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
3356
3357 alg->init = caam_aead_init;
3358 alg->exit = caam_aead_exit;
3359 }
3360
caam_algapi_init(void)3361 static int __init caam_algapi_init(void)
3362 {
3363 struct device_node *dev_node;
3364 struct platform_device *pdev;
3365 struct device *ctrldev;
3366 struct caam_drv_private *priv;
3367 int i = 0, err = 0;
3368 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
3369 unsigned int md_limit = SHA512_DIGEST_SIZE;
3370 bool registered = false;
3371
3372 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
3373 if (!dev_node) {
3374 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
3375 if (!dev_node)
3376 return -ENODEV;
3377 }
3378
3379 pdev = of_find_device_by_node(dev_node);
3380 if (!pdev) {
3381 of_node_put(dev_node);
3382 return -ENODEV;
3383 }
3384
3385 ctrldev = &pdev->dev;
3386 priv = dev_get_drvdata(ctrldev);
3387 of_node_put(dev_node);
3388
3389 /*
3390 * If priv is NULL, it's probably because the caam driver wasn't
3391 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
3392 */
3393 if (!priv)
3394 return -ENODEV;
3395
3396
3397 INIT_LIST_HEAD(&alg_list);
3398
3399 /*
3400 * Register crypto algorithms the device supports.
3401 * First, detect presence and attributes of DES, AES, and MD blocks.
3402 */
3403 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
3404 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
3405 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
3406 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
3407 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
3408
3409 /* If MD is present, limit digest size based on LP256 */
3410 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
3411 md_limit = SHA256_DIGEST_SIZE;
3412
3413 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3414 struct caam_crypto_alg *t_alg;
3415 struct caam_alg_template *alg = driver_algs + i;
3416 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
3417
3418 /* Skip DES algorithms if not supported by device */
3419 if (!des_inst &&
3420 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
3421 (alg_sel == OP_ALG_ALGSEL_DES)))
3422 continue;
3423
3424 /* Skip AES algorithms if not supported by device */
3425 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
3426 continue;
3427
3428 /*
3429 * Check support for AES modes not available
3430 * on LP devices.
3431 */
3432 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
3433 if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
3434 OP_ALG_AAI_XTS)
3435 continue;
3436
3437 t_alg = caam_alg_alloc(alg);
3438 if (IS_ERR(t_alg)) {
3439 err = PTR_ERR(t_alg);
3440 pr_warn("%s alg allocation failed\n", alg->driver_name);
3441 continue;
3442 }
3443
3444 err = crypto_register_alg(&t_alg->crypto_alg);
3445 if (err) {
3446 pr_warn("%s alg registration failed\n",
3447 t_alg->crypto_alg.cra_driver_name);
3448 kfree(t_alg);
3449 continue;
3450 }
3451
3452 list_add_tail(&t_alg->entry, &alg_list);
3453 registered = true;
3454 }
3455
3456 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
3457 struct caam_aead_alg *t_alg = driver_aeads + i;
3458 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
3459 OP_ALG_ALGSEL_MASK;
3460 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
3461 OP_ALG_ALGSEL_MASK;
3462 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
3463
3464 /* Skip DES algorithms if not supported by device */
3465 if (!des_inst &&
3466 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
3467 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
3468 continue;
3469
3470 /* Skip AES algorithms if not supported by device */
3471 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
3472 continue;
3473
3474 /*
3475 * Check support for AES algorithms not available
3476 * on LP devices.
3477 */
3478 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
3479 if (alg_aai == OP_ALG_AAI_GCM)
3480 continue;
3481
3482 /*
3483 * Skip algorithms requiring message digests
3484 * if MD or MD size is not supported by device.
3485 */
3486 if (c2_alg_sel &&
3487 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
3488 continue;
3489
3490 caam_aead_alg_init(t_alg);
3491
3492 err = crypto_register_aead(&t_alg->aead);
3493 if (err) {
3494 pr_warn("%s alg registration failed\n",
3495 t_alg->aead.base.cra_driver_name);
3496 continue;
3497 }
3498
3499 t_alg->registered = true;
3500 registered = true;
3501 }
3502
3503 if (registered)
3504 pr_info("caam algorithms registered in /proc/crypto\n");
3505
3506 return err;
3507 }
3508
3509 module_init(caam_algapi_init);
3510 module_exit(caam_algapi_exit);
3511
3512 MODULE_LICENSE("GPL");
3513 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
3514 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
3515