1 /*
2 * Freescale FSL CAAM support for crypto API over QI backend.
3 * Based on caamalg.c
4 *
5 * Copyright 2013-2016 Freescale Semiconductor, Inc.
6 * Copyright 2016-2017 NXP
7 */
8
9 #include "compat.h"
10
11 #include "regs.h"
12 #include "intern.h"
13 #include "desc_constr.h"
14 #include "error.h"
15 #include "sg_sw_qm.h"
16 #include "key_gen.h"
17 #include "qi.h"
18 #include "jr.h"
19 #include "caamalg_desc.h"
20
21 /*
22 * crypto alg
23 */
24 #define CAAM_CRA_PRIORITY 2000
25 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
26 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
27 SHA512_DIGEST_SIZE * 2)
28
29 #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
30 CAAM_MAX_KEY_SIZE)
31 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
32
33 struct caam_alg_entry {
34 int class1_alg_type;
35 int class2_alg_type;
36 bool rfc3686;
37 bool geniv;
38 };
39
40 struct caam_aead_alg {
41 struct aead_alg aead;
42 struct caam_alg_entry caam;
43 bool registered;
44 };
45
46 /*
47 * per-session context
48 */
49 struct caam_ctx {
50 struct device *jrdev;
51 u32 sh_desc_enc[DESC_MAX_USED_LEN];
52 u32 sh_desc_dec[DESC_MAX_USED_LEN];
53 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
54 u8 key[CAAM_MAX_KEY_SIZE];
55 dma_addr_t key_dma;
56 struct alginfo adata;
57 struct alginfo cdata;
58 unsigned int authsize;
59 struct device *qidev;
60 spinlock_t lock; /* Protects multiple init of driver context */
61 struct caam_drv_ctx *drv_ctx[NUM_OP];
62 };
63
aead_set_sh_desc(struct crypto_aead * aead)64 static int aead_set_sh_desc(struct crypto_aead *aead)
65 {
66 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
67 typeof(*alg), aead);
68 struct caam_ctx *ctx = crypto_aead_ctx(aead);
69 unsigned int ivsize = crypto_aead_ivsize(aead);
70 u32 ctx1_iv_off = 0;
71 u32 *nonce = NULL;
72 unsigned int data_len[2];
73 u32 inl_mask;
74 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
75 OP_ALG_AAI_CTR_MOD128);
76 const bool is_rfc3686 = alg->caam.rfc3686;
77
78 if (!ctx->cdata.keylen || !ctx->authsize)
79 return 0;
80
81 /*
82 * AES-CTR needs to load IV in CONTEXT1 reg
83 * at an offset of 128bits (16bytes)
84 * CONTEXT1[255:128] = IV
85 */
86 if (ctr_mode)
87 ctx1_iv_off = 16;
88
89 /*
90 * RFC3686 specific:
91 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
92 */
93 if (is_rfc3686) {
94 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
95 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
96 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
97 }
98
99 data_len[0] = ctx->adata.keylen_pad;
100 data_len[1] = ctx->cdata.keylen;
101
102 if (alg->caam.geniv)
103 goto skip_enc;
104
105 /* aead_encrypt shared descriptor */
106 if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
107 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
108 DESC_JOB_IO_LEN, data_len, &inl_mask,
109 ARRAY_SIZE(data_len)) < 0)
110 return -EINVAL;
111
112 if (inl_mask & 1)
113 ctx->adata.key_virt = ctx->key;
114 else
115 ctx->adata.key_dma = ctx->key_dma;
116
117 if (inl_mask & 2)
118 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
119 else
120 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
121
122 ctx->adata.key_inline = !!(inl_mask & 1);
123 ctx->cdata.key_inline = !!(inl_mask & 2);
124
125 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
126 ivsize, ctx->authsize, is_rfc3686, nonce,
127 ctx1_iv_off, true);
128
129 skip_enc:
130 /* aead_decrypt shared descriptor */
131 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
132 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
133 DESC_JOB_IO_LEN, data_len, &inl_mask,
134 ARRAY_SIZE(data_len)) < 0)
135 return -EINVAL;
136
137 if (inl_mask & 1)
138 ctx->adata.key_virt = ctx->key;
139 else
140 ctx->adata.key_dma = ctx->key_dma;
141
142 if (inl_mask & 2)
143 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
144 else
145 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
146
147 ctx->adata.key_inline = !!(inl_mask & 1);
148 ctx->cdata.key_inline = !!(inl_mask & 2);
149
150 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
151 ivsize, ctx->authsize, alg->caam.geniv,
152 is_rfc3686, nonce, ctx1_iv_off, true);
153
154 if (!alg->caam.geniv)
155 goto skip_givenc;
156
157 /* aead_givencrypt shared descriptor */
158 if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
159 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
160 DESC_JOB_IO_LEN, data_len, &inl_mask,
161 ARRAY_SIZE(data_len)) < 0)
162 return -EINVAL;
163
164 if (inl_mask & 1)
165 ctx->adata.key_virt = ctx->key;
166 else
167 ctx->adata.key_dma = ctx->key_dma;
168
169 if (inl_mask & 2)
170 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
171 else
172 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
173
174 ctx->adata.key_inline = !!(inl_mask & 1);
175 ctx->cdata.key_inline = !!(inl_mask & 2);
176
177 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
178 ivsize, ctx->authsize, is_rfc3686, nonce,
179 ctx1_iv_off, true);
180
181 skip_givenc:
182 return 0;
183 }
184
aead_setauthsize(struct crypto_aead * authenc,unsigned int authsize)185 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
186 {
187 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
188
189 ctx->authsize = authsize;
190 aead_set_sh_desc(authenc);
191
192 return 0;
193 }
194
aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)195 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
196 unsigned int keylen)
197 {
198 struct caam_ctx *ctx = crypto_aead_ctx(aead);
199 struct device *jrdev = ctx->jrdev;
200 struct crypto_authenc_keys keys;
201 int ret = 0;
202
203 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
204 goto badkey;
205
206 #ifdef DEBUG
207 dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
208 keys.authkeylen + keys.enckeylen, keys.enckeylen,
209 keys.authkeylen);
210 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
211 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
212 #endif
213
214 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
215 keys.authkeylen, CAAM_MAX_KEY_SIZE -
216 keys.enckeylen);
217 if (ret)
218 goto badkey;
219
220 /* postpend encryption key to auth split key */
221 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
222 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
223 keys.enckeylen, DMA_TO_DEVICE);
224 #ifdef DEBUG
225 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
226 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
227 ctx->adata.keylen_pad + keys.enckeylen, 1);
228 #endif
229
230 ctx->cdata.keylen = keys.enckeylen;
231
232 ret = aead_set_sh_desc(aead);
233 if (ret)
234 goto badkey;
235
236 /* Now update the driver contexts with the new shared descriptor */
237 if (ctx->drv_ctx[ENCRYPT]) {
238 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
239 ctx->sh_desc_enc);
240 if (ret) {
241 dev_err(jrdev, "driver enc context update failed\n");
242 goto badkey;
243 }
244 }
245
246 if (ctx->drv_ctx[DECRYPT]) {
247 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
248 ctx->sh_desc_dec);
249 if (ret) {
250 dev_err(jrdev, "driver dec context update failed\n");
251 goto badkey;
252 }
253 }
254
255 return ret;
256 badkey:
257 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
258 return -EINVAL;
259 }
260
ablkcipher_setkey(struct crypto_ablkcipher * ablkcipher,const u8 * key,unsigned int keylen)261 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
262 const u8 *key, unsigned int keylen)
263 {
264 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
265 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
266 const char *alg_name = crypto_tfm_alg_name(tfm);
267 struct device *jrdev = ctx->jrdev;
268 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
269 u32 ctx1_iv_off = 0;
270 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
271 OP_ALG_AAI_CTR_MOD128);
272 const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
273 int ret = 0;
274
275 memcpy(ctx->key, key, keylen);
276 #ifdef DEBUG
277 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
278 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
279 #endif
280 /*
281 * AES-CTR needs to load IV in CONTEXT1 reg
282 * at an offset of 128bits (16bytes)
283 * CONTEXT1[255:128] = IV
284 */
285 if (ctr_mode)
286 ctx1_iv_off = 16;
287
288 /*
289 * RFC3686 specific:
290 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
291 * | *key = {KEY, NONCE}
292 */
293 if (is_rfc3686) {
294 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
295 keylen -= CTR_RFC3686_NONCE_SIZE;
296 }
297
298 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
299 ctx->cdata.keylen = keylen;
300 ctx->cdata.key_virt = ctx->key;
301 ctx->cdata.key_inline = true;
302
303 /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
304 cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
305 is_rfc3686, ctx1_iv_off);
306 cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
307 is_rfc3686, ctx1_iv_off);
308 cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
309 ivsize, is_rfc3686, ctx1_iv_off);
310
311 /* Now update the driver contexts with the new shared descriptor */
312 if (ctx->drv_ctx[ENCRYPT]) {
313 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
314 ctx->sh_desc_enc);
315 if (ret) {
316 dev_err(jrdev, "driver enc context update failed\n");
317 goto badkey;
318 }
319 }
320
321 if (ctx->drv_ctx[DECRYPT]) {
322 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
323 ctx->sh_desc_dec);
324 if (ret) {
325 dev_err(jrdev, "driver dec context update failed\n");
326 goto badkey;
327 }
328 }
329
330 if (ctx->drv_ctx[GIVENCRYPT]) {
331 ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
332 ctx->sh_desc_givenc);
333 if (ret) {
334 dev_err(jrdev, "driver givenc context update failed\n");
335 goto badkey;
336 }
337 }
338
339 return ret;
340 badkey:
341 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
342 return -EINVAL;
343 }
344
xts_ablkcipher_setkey(struct crypto_ablkcipher * ablkcipher,const u8 * key,unsigned int keylen)345 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
346 const u8 *key, unsigned int keylen)
347 {
348 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
349 struct device *jrdev = ctx->jrdev;
350 int ret = 0;
351
352 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
353 dev_err(jrdev, "key size mismatch\n");
354 goto badkey;
355 }
356
357 memcpy(ctx->key, key, keylen);
358 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
359 ctx->cdata.keylen = keylen;
360 ctx->cdata.key_virt = ctx->key;
361 ctx->cdata.key_inline = true;
362
363 /* xts ablkcipher encrypt, decrypt shared descriptors */
364 cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
365 cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
366
367 /* Now update the driver contexts with the new shared descriptor */
368 if (ctx->drv_ctx[ENCRYPT]) {
369 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
370 ctx->sh_desc_enc);
371 if (ret) {
372 dev_err(jrdev, "driver enc context update failed\n");
373 goto badkey;
374 }
375 }
376
377 if (ctx->drv_ctx[DECRYPT]) {
378 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
379 ctx->sh_desc_dec);
380 if (ret) {
381 dev_err(jrdev, "driver dec context update failed\n");
382 goto badkey;
383 }
384 }
385
386 return ret;
387 badkey:
388 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
389 return -EINVAL;
390 }
391
392 /*
393 * aead_edesc - s/w-extended aead descriptor
394 * @src_nents: number of segments in input scatterlist
395 * @dst_nents: number of segments in output scatterlist
396 * @iv_dma: dma address of iv for checking continuity and link table
397 * @qm_sg_bytes: length of dma mapped h/w link table
398 * @qm_sg_dma: bus physical mapped address of h/w link table
399 * @assoclen: associated data length, in CAAM endianness
400 * @assoclen_dma: bus physical mapped address of req->assoclen
401 * @drv_req: driver-specific request structure
402 * @sgt: the h/w link table, followed by IV
403 */
404 struct aead_edesc {
405 int src_nents;
406 int dst_nents;
407 dma_addr_t iv_dma;
408 int qm_sg_bytes;
409 dma_addr_t qm_sg_dma;
410 unsigned int assoclen;
411 dma_addr_t assoclen_dma;
412 struct caam_drv_req drv_req;
413 struct qm_sg_entry sgt[0];
414 };
415
416 /*
417 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
418 * @src_nents: number of segments in input scatterlist
419 * @dst_nents: number of segments in output scatterlist
420 * @iv_dma: dma address of iv for checking continuity and link table
421 * @qm_sg_bytes: length of dma mapped h/w link table
422 * @qm_sg_dma: bus physical mapped address of h/w link table
423 * @drv_req: driver-specific request structure
424 * @sgt: the h/w link table, followed by IV
425 */
426 struct ablkcipher_edesc {
427 int src_nents;
428 int dst_nents;
429 dma_addr_t iv_dma;
430 int qm_sg_bytes;
431 dma_addr_t qm_sg_dma;
432 struct caam_drv_req drv_req;
433 struct qm_sg_entry sgt[0];
434 };
435
get_drv_ctx(struct caam_ctx * ctx,enum optype type)436 static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
437 enum optype type)
438 {
439 /*
440 * This function is called on the fast path with values of 'type'
441 * known at compile time. Invalid arguments are not expected and
442 * thus no checks are made.
443 */
444 struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
445 u32 *desc;
446
447 if (unlikely(!drv_ctx)) {
448 spin_lock(&ctx->lock);
449
450 /* Read again to check if some other core init drv_ctx */
451 drv_ctx = ctx->drv_ctx[type];
452 if (!drv_ctx) {
453 int cpu;
454
455 if (type == ENCRYPT)
456 desc = ctx->sh_desc_enc;
457 else if (type == DECRYPT)
458 desc = ctx->sh_desc_dec;
459 else /* (type == GIVENCRYPT) */
460 desc = ctx->sh_desc_givenc;
461
462 cpu = smp_processor_id();
463 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
464 if (likely(!IS_ERR_OR_NULL(drv_ctx)))
465 drv_ctx->op_type = type;
466
467 ctx->drv_ctx[type] = drv_ctx;
468 }
469
470 spin_unlock(&ctx->lock);
471 }
472
473 return drv_ctx;
474 }
475
caam_unmap(struct device * dev,struct scatterlist * src,struct scatterlist * dst,int src_nents,int dst_nents,dma_addr_t iv_dma,int ivsize,enum optype op_type,dma_addr_t qm_sg_dma,int qm_sg_bytes)476 static void caam_unmap(struct device *dev, struct scatterlist *src,
477 struct scatterlist *dst, int src_nents,
478 int dst_nents, dma_addr_t iv_dma, int ivsize,
479 enum optype op_type, dma_addr_t qm_sg_dma,
480 int qm_sg_bytes)
481 {
482 if (dst != src) {
483 if (src_nents)
484 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
485 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
486 } else {
487 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
488 }
489
490 if (iv_dma)
491 dma_unmap_single(dev, iv_dma, ivsize,
492 op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
493 DMA_TO_DEVICE);
494 if (qm_sg_bytes)
495 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
496 }
497
aead_unmap(struct device * dev,struct aead_edesc * edesc,struct aead_request * req)498 static void aead_unmap(struct device *dev,
499 struct aead_edesc *edesc,
500 struct aead_request *req)
501 {
502 struct crypto_aead *aead = crypto_aead_reqtfm(req);
503 int ivsize = crypto_aead_ivsize(aead);
504
505 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
506 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
507 edesc->qm_sg_dma, edesc->qm_sg_bytes);
508 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
509 }
510
ablkcipher_unmap(struct device * dev,struct ablkcipher_edesc * edesc,struct ablkcipher_request * req)511 static void ablkcipher_unmap(struct device *dev,
512 struct ablkcipher_edesc *edesc,
513 struct ablkcipher_request *req)
514 {
515 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
516 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
517
518 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
519 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
520 edesc->qm_sg_dma, edesc->qm_sg_bytes);
521 }
522
aead_done(struct caam_drv_req * drv_req,u32 status)523 static void aead_done(struct caam_drv_req *drv_req, u32 status)
524 {
525 struct device *qidev;
526 struct aead_edesc *edesc;
527 struct aead_request *aead_req = drv_req->app_ctx;
528 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
529 struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
530 int ecode = 0;
531
532 qidev = caam_ctx->qidev;
533
534 if (unlikely(status)) {
535 caam_jr_strstatus(qidev, status);
536 ecode = -EIO;
537 }
538
539 edesc = container_of(drv_req, typeof(*edesc), drv_req);
540 aead_unmap(qidev, edesc, aead_req);
541
542 aead_request_complete(aead_req, ecode);
543 qi_cache_free(edesc);
544 }
545
546 /*
547 * allocate and map the aead extended descriptor
548 */
aead_edesc_alloc(struct aead_request * req,bool encrypt)549 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
550 bool encrypt)
551 {
552 struct crypto_aead *aead = crypto_aead_reqtfm(req);
553 struct caam_ctx *ctx = crypto_aead_ctx(aead);
554 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
555 typeof(*alg), aead);
556 struct device *qidev = ctx->qidev;
557 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
558 GFP_KERNEL : GFP_ATOMIC;
559 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
560 struct aead_edesc *edesc;
561 dma_addr_t qm_sg_dma, iv_dma = 0;
562 int ivsize = 0;
563 unsigned int authsize = ctx->authsize;
564 int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
565 int in_len, out_len;
566 struct qm_sg_entry *sg_table, *fd_sgt;
567 struct caam_drv_ctx *drv_ctx;
568 enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
569
570 drv_ctx = get_drv_ctx(ctx, op_type);
571 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
572 return (struct aead_edesc *)drv_ctx;
573
574 /* allocate space for base edesc and hw desc commands, link tables */
575 edesc = qi_cache_alloc(GFP_DMA | flags);
576 if (unlikely(!edesc)) {
577 dev_err(qidev, "could not allocate extended descriptor\n");
578 return ERR_PTR(-ENOMEM);
579 }
580
581 if (likely(req->src == req->dst)) {
582 src_nents = sg_nents_for_len(req->src, req->assoclen +
583 req->cryptlen +
584 (encrypt ? authsize : 0));
585 if (unlikely(src_nents < 0)) {
586 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
587 req->assoclen + req->cryptlen +
588 (encrypt ? authsize : 0));
589 qi_cache_free(edesc);
590 return ERR_PTR(src_nents);
591 }
592
593 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
594 DMA_BIDIRECTIONAL);
595 if (unlikely(!mapped_src_nents)) {
596 dev_err(qidev, "unable to map source\n");
597 qi_cache_free(edesc);
598 return ERR_PTR(-ENOMEM);
599 }
600 } else {
601 src_nents = sg_nents_for_len(req->src, req->assoclen +
602 req->cryptlen);
603 if (unlikely(src_nents < 0)) {
604 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
605 req->assoclen + req->cryptlen);
606 qi_cache_free(edesc);
607 return ERR_PTR(src_nents);
608 }
609
610 dst_nents = sg_nents_for_len(req->dst, req->assoclen +
611 req->cryptlen +
612 (encrypt ? authsize :
613 (-authsize)));
614 if (unlikely(dst_nents < 0)) {
615 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
616 req->assoclen + req->cryptlen +
617 (encrypt ? authsize : (-authsize)));
618 qi_cache_free(edesc);
619 return ERR_PTR(dst_nents);
620 }
621
622 if (src_nents) {
623 mapped_src_nents = dma_map_sg(qidev, req->src,
624 src_nents, DMA_TO_DEVICE);
625 if (unlikely(!mapped_src_nents)) {
626 dev_err(qidev, "unable to map source\n");
627 qi_cache_free(edesc);
628 return ERR_PTR(-ENOMEM);
629 }
630 } else {
631 mapped_src_nents = 0;
632 }
633
634 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
635 DMA_FROM_DEVICE);
636 if (unlikely(!mapped_dst_nents)) {
637 dev_err(qidev, "unable to map destination\n");
638 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
639 qi_cache_free(edesc);
640 return ERR_PTR(-ENOMEM);
641 }
642 }
643
644 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
645 ivsize = crypto_aead_ivsize(aead);
646
647 /*
648 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
649 * Input is not contiguous.
650 */
651 qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
652 (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
653 sg_table = &edesc->sgt[0];
654 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
655 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
656 CAAM_QI_MEMCACHE_SIZE)) {
657 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
658 qm_sg_ents, ivsize);
659 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
660 0, 0, 0, 0);
661 qi_cache_free(edesc);
662 return ERR_PTR(-ENOMEM);
663 }
664
665 if (ivsize) {
666 u8 *iv = (u8 *)(sg_table + qm_sg_ents);
667
668 /* Make sure IV is located in a DMAable area */
669 memcpy(iv, req->iv, ivsize);
670
671 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
672 if (dma_mapping_error(qidev, iv_dma)) {
673 dev_err(qidev, "unable to map IV\n");
674 caam_unmap(qidev, req->src, req->dst, src_nents,
675 dst_nents, 0, 0, 0, 0, 0);
676 qi_cache_free(edesc);
677 return ERR_PTR(-ENOMEM);
678 }
679 }
680
681 edesc->src_nents = src_nents;
682 edesc->dst_nents = dst_nents;
683 edesc->iv_dma = iv_dma;
684 edesc->drv_req.app_ctx = req;
685 edesc->drv_req.cbk = aead_done;
686 edesc->drv_req.drv_ctx = drv_ctx;
687
688 edesc->assoclen = cpu_to_caam32(req->assoclen);
689 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
690 DMA_TO_DEVICE);
691 if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
692 dev_err(qidev, "unable to map assoclen\n");
693 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
694 iv_dma, ivsize, op_type, 0, 0);
695 qi_cache_free(edesc);
696 return ERR_PTR(-ENOMEM);
697 }
698
699 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
700 qm_sg_index++;
701 if (ivsize) {
702 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
703 qm_sg_index++;
704 }
705 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
706 qm_sg_index += mapped_src_nents;
707
708 if (mapped_dst_nents > 1)
709 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
710 qm_sg_index, 0);
711
712 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
713 if (dma_mapping_error(qidev, qm_sg_dma)) {
714 dev_err(qidev, "unable to map S/G table\n");
715 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
716 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
717 iv_dma, ivsize, op_type, 0, 0);
718 qi_cache_free(edesc);
719 return ERR_PTR(-ENOMEM);
720 }
721
722 edesc->qm_sg_dma = qm_sg_dma;
723 edesc->qm_sg_bytes = qm_sg_bytes;
724
725 out_len = req->assoclen + req->cryptlen +
726 (encrypt ? ctx->authsize : (-ctx->authsize));
727 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
728
729 fd_sgt = &edesc->drv_req.fd_sgt[0];
730 dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
731
732 if (req->dst == req->src) {
733 if (mapped_src_nents == 1)
734 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
735 out_len, 0);
736 else
737 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
738 (1 + !!ivsize) * sizeof(*sg_table),
739 out_len, 0);
740 } else if (mapped_dst_nents == 1) {
741 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
742 0);
743 } else {
744 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
745 qm_sg_index, out_len, 0);
746 }
747
748 return edesc;
749 }
750
aead_crypt(struct aead_request * req,bool encrypt)751 static inline int aead_crypt(struct aead_request *req, bool encrypt)
752 {
753 struct aead_edesc *edesc;
754 struct crypto_aead *aead = crypto_aead_reqtfm(req);
755 struct caam_ctx *ctx = crypto_aead_ctx(aead);
756 int ret;
757
758 if (unlikely(caam_congested))
759 return -EAGAIN;
760
761 /* allocate extended descriptor */
762 edesc = aead_edesc_alloc(req, encrypt);
763 if (IS_ERR_OR_NULL(edesc))
764 return PTR_ERR(edesc);
765
766 /* Create and submit job descriptor */
767 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
768 if (!ret) {
769 ret = -EINPROGRESS;
770 } else {
771 aead_unmap(ctx->qidev, edesc, req);
772 qi_cache_free(edesc);
773 }
774
775 return ret;
776 }
777
aead_encrypt(struct aead_request * req)778 static int aead_encrypt(struct aead_request *req)
779 {
780 return aead_crypt(req, true);
781 }
782
aead_decrypt(struct aead_request * req)783 static int aead_decrypt(struct aead_request *req)
784 {
785 return aead_crypt(req, false);
786 }
787
ablkcipher_done(struct caam_drv_req * drv_req,u32 status)788 static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
789 {
790 struct ablkcipher_edesc *edesc;
791 struct ablkcipher_request *req = drv_req->app_ctx;
792 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
793 struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
794 struct device *qidev = caam_ctx->qidev;
795 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
796
797 #ifdef DEBUG
798 dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
799 #endif
800
801 edesc = container_of(drv_req, typeof(*edesc), drv_req);
802
803 if (status)
804 caam_jr_strstatus(qidev, status);
805
806 #ifdef DEBUG
807 print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
808 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
809 edesc->src_nents > 1 ? 100 : ivsize, 1);
810 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
811 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
812 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
813 #endif
814
815 ablkcipher_unmap(qidev, edesc, req);
816
817 /* In case initial IV was generated, copy it in GIVCIPHER request */
818 if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
819 u8 *iv;
820 struct skcipher_givcrypt_request *greq;
821
822 greq = container_of(req, struct skcipher_givcrypt_request,
823 creq);
824 iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
825 memcpy(greq->giv, iv, ivsize);
826 }
827
828 /*
829 * The crypto API expects us to set the IV (req->info) to the last
830 * ciphertext block. This is used e.g. by the CTS mode.
831 */
832 if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
833 scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
834 ivsize, ivsize, 0);
835
836 qi_cache_free(edesc);
837 ablkcipher_request_complete(req, status);
838 }
839
ablkcipher_edesc_alloc(struct ablkcipher_request * req,bool encrypt)840 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
841 *req, bool encrypt)
842 {
843 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
844 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
845 struct device *qidev = ctx->qidev;
846 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
847 GFP_KERNEL : GFP_ATOMIC;
848 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
849 struct ablkcipher_edesc *edesc;
850 dma_addr_t iv_dma;
851 u8 *iv;
852 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
853 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
854 struct qm_sg_entry *sg_table, *fd_sgt;
855 struct caam_drv_ctx *drv_ctx;
856 enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
857
858 drv_ctx = get_drv_ctx(ctx, op_type);
859 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
860 return (struct ablkcipher_edesc *)drv_ctx;
861
862 src_nents = sg_nents_for_len(req->src, req->nbytes);
863 if (unlikely(src_nents < 0)) {
864 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
865 req->nbytes);
866 return ERR_PTR(src_nents);
867 }
868
869 if (unlikely(req->src != req->dst)) {
870 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
871 if (unlikely(dst_nents < 0)) {
872 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
873 req->nbytes);
874 return ERR_PTR(dst_nents);
875 }
876
877 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
878 DMA_TO_DEVICE);
879 if (unlikely(!mapped_src_nents)) {
880 dev_err(qidev, "unable to map source\n");
881 return ERR_PTR(-ENOMEM);
882 }
883
884 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
885 DMA_FROM_DEVICE);
886 if (unlikely(!mapped_dst_nents)) {
887 dev_err(qidev, "unable to map destination\n");
888 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
889 return ERR_PTR(-ENOMEM);
890 }
891 } else {
892 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
893 DMA_BIDIRECTIONAL);
894 if (unlikely(!mapped_src_nents)) {
895 dev_err(qidev, "unable to map source\n");
896 return ERR_PTR(-ENOMEM);
897 }
898 }
899
900 qm_sg_ents = 1 + mapped_src_nents;
901 dst_sg_idx = qm_sg_ents;
902
903 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
904 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
905 if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
906 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
907 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
908 qm_sg_ents, ivsize);
909 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
910 0, 0, 0, 0);
911 return ERR_PTR(-ENOMEM);
912 }
913
914 /* allocate space for base edesc, link tables and IV */
915 edesc = qi_cache_alloc(GFP_DMA | flags);
916 if (unlikely(!edesc)) {
917 dev_err(qidev, "could not allocate extended descriptor\n");
918 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
919 0, 0, 0, 0);
920 return ERR_PTR(-ENOMEM);
921 }
922
923 /* Make sure IV is located in a DMAable area */
924 sg_table = &edesc->sgt[0];
925 iv = (u8 *)(sg_table + qm_sg_ents);
926 memcpy(iv, req->info, ivsize);
927
928 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
929 if (dma_mapping_error(qidev, iv_dma)) {
930 dev_err(qidev, "unable to map IV\n");
931 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
932 0, 0, 0, 0);
933 qi_cache_free(edesc);
934 return ERR_PTR(-ENOMEM);
935 }
936
937 edesc->src_nents = src_nents;
938 edesc->dst_nents = dst_nents;
939 edesc->iv_dma = iv_dma;
940 edesc->qm_sg_bytes = qm_sg_bytes;
941 edesc->drv_req.app_ctx = req;
942 edesc->drv_req.cbk = ablkcipher_done;
943 edesc->drv_req.drv_ctx = drv_ctx;
944
945 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
946 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
947
948 if (mapped_dst_nents > 1)
949 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
950 dst_sg_idx, 0);
951
952 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
953 DMA_TO_DEVICE);
954 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
955 dev_err(qidev, "unable to map S/G table\n");
956 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
957 iv_dma, ivsize, op_type, 0, 0);
958 qi_cache_free(edesc);
959 return ERR_PTR(-ENOMEM);
960 }
961
962 fd_sgt = &edesc->drv_req.fd_sgt[0];
963
964 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
965 ivsize + req->nbytes, 0);
966
967 if (req->src == req->dst) {
968 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
969 sizeof(*sg_table), req->nbytes, 0);
970 } else if (mapped_dst_nents > 1) {
971 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
972 sizeof(*sg_table), req->nbytes, 0);
973 } else {
974 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
975 req->nbytes, 0);
976 }
977
978 return edesc;
979 }
980
ablkcipher_giv_edesc_alloc(struct skcipher_givcrypt_request * creq)981 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
982 struct skcipher_givcrypt_request *creq)
983 {
984 struct ablkcipher_request *req = &creq->creq;
985 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
986 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
987 struct device *qidev = ctx->qidev;
988 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
989 GFP_KERNEL : GFP_ATOMIC;
990 int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
991 struct ablkcipher_edesc *edesc;
992 dma_addr_t iv_dma;
993 u8 *iv;
994 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
995 struct qm_sg_entry *sg_table, *fd_sgt;
996 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
997 struct caam_drv_ctx *drv_ctx;
998
999 drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
1000 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1001 return (struct ablkcipher_edesc *)drv_ctx;
1002
1003 src_nents = sg_nents_for_len(req->src, req->nbytes);
1004 if (unlikely(src_nents < 0)) {
1005 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1006 req->nbytes);
1007 return ERR_PTR(src_nents);
1008 }
1009
1010 if (unlikely(req->src != req->dst)) {
1011 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1012 if (unlikely(dst_nents < 0)) {
1013 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1014 req->nbytes);
1015 return ERR_PTR(dst_nents);
1016 }
1017
1018 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1019 DMA_TO_DEVICE);
1020 if (unlikely(!mapped_src_nents)) {
1021 dev_err(qidev, "unable to map source\n");
1022 return ERR_PTR(-ENOMEM);
1023 }
1024
1025 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1026 DMA_FROM_DEVICE);
1027 if (unlikely(!mapped_dst_nents)) {
1028 dev_err(qidev, "unable to map destination\n");
1029 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1030 return ERR_PTR(-ENOMEM);
1031 }
1032 } else {
1033 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1034 DMA_BIDIRECTIONAL);
1035 if (unlikely(!mapped_src_nents)) {
1036 dev_err(qidev, "unable to map source\n");
1037 return ERR_PTR(-ENOMEM);
1038 }
1039
1040 dst_nents = src_nents;
1041 mapped_dst_nents = src_nents;
1042 }
1043
1044 qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1045 dst_sg_idx = qm_sg_ents;
1046
1047 qm_sg_ents += 1 + mapped_dst_nents;
1048 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1049 if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
1050 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1051 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1052 qm_sg_ents, ivsize);
1053 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1054 0, 0, 0, 0);
1055 return ERR_PTR(-ENOMEM);
1056 }
1057
1058 /* allocate space for base edesc, link tables and IV */
1059 edesc = qi_cache_alloc(GFP_DMA | flags);
1060 if (!edesc) {
1061 dev_err(qidev, "could not allocate extended descriptor\n");
1062 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1063 0, 0, 0, 0);
1064 return ERR_PTR(-ENOMEM);
1065 }
1066
1067 /* Make sure IV is located in a DMAable area */
1068 sg_table = &edesc->sgt[0];
1069 iv = (u8 *)(sg_table + qm_sg_ents);
1070 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
1071 if (dma_mapping_error(qidev, iv_dma)) {
1072 dev_err(qidev, "unable to map IV\n");
1073 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1074 0, 0, 0, 0);
1075 qi_cache_free(edesc);
1076 return ERR_PTR(-ENOMEM);
1077 }
1078
1079 edesc->src_nents = src_nents;
1080 edesc->dst_nents = dst_nents;
1081 edesc->iv_dma = iv_dma;
1082 edesc->qm_sg_bytes = qm_sg_bytes;
1083 edesc->drv_req.app_ctx = req;
1084 edesc->drv_req.cbk = ablkcipher_done;
1085 edesc->drv_req.drv_ctx = drv_ctx;
1086
1087 if (mapped_src_nents > 1)
1088 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
1089
1090 dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
1091 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
1092 0);
1093
1094 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1095 DMA_TO_DEVICE);
1096 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1097 dev_err(qidev, "unable to map S/G table\n");
1098 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1099 iv_dma, ivsize, GIVENCRYPT, 0, 0);
1100 qi_cache_free(edesc);
1101 return ERR_PTR(-ENOMEM);
1102 }
1103
1104 fd_sgt = &edesc->drv_req.fd_sgt[0];
1105
1106 if (mapped_src_nents > 1)
1107 dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
1108 0);
1109 else
1110 dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
1111 req->nbytes, 0);
1112
1113 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1114 sizeof(*sg_table), ivsize + req->nbytes, 0);
1115
1116 return edesc;
1117 }
1118
ablkcipher_crypt(struct ablkcipher_request * req,bool encrypt)1119 static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
1120 {
1121 struct ablkcipher_edesc *edesc;
1122 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1123 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1124 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1125 int ret;
1126
1127 if (unlikely(caam_congested))
1128 return -EAGAIN;
1129
1130 /* allocate extended descriptor */
1131 edesc = ablkcipher_edesc_alloc(req, encrypt);
1132 if (IS_ERR(edesc))
1133 return PTR_ERR(edesc);
1134
1135 /*
1136 * The crypto API expects us to set the IV (req->info) to the last
1137 * ciphertext block.
1138 */
1139 if (!encrypt)
1140 scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
1141 ivsize, ivsize, 0);
1142
1143 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1144 if (!ret) {
1145 ret = -EINPROGRESS;
1146 } else {
1147 ablkcipher_unmap(ctx->qidev, edesc, req);
1148 qi_cache_free(edesc);
1149 }
1150
1151 return ret;
1152 }
1153
ablkcipher_encrypt(struct ablkcipher_request * req)1154 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1155 {
1156 return ablkcipher_crypt(req, true);
1157 }
1158
ablkcipher_decrypt(struct ablkcipher_request * req)1159 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1160 {
1161 return ablkcipher_crypt(req, false);
1162 }
1163
ablkcipher_givencrypt(struct skcipher_givcrypt_request * creq)1164 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1165 {
1166 struct ablkcipher_request *req = &creq->creq;
1167 struct ablkcipher_edesc *edesc;
1168 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1169 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1170 int ret;
1171
1172 if (unlikely(caam_congested))
1173 return -EAGAIN;
1174
1175 /* allocate extended descriptor */
1176 edesc = ablkcipher_giv_edesc_alloc(creq);
1177 if (IS_ERR(edesc))
1178 return PTR_ERR(edesc);
1179
1180 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1181 if (!ret) {
1182 ret = -EINPROGRESS;
1183 } else {
1184 ablkcipher_unmap(ctx->qidev, edesc, req);
1185 qi_cache_free(edesc);
1186 }
1187
1188 return ret;
1189 }
1190
1191 #define template_ablkcipher template_u.ablkcipher
1192 struct caam_alg_template {
1193 char name[CRYPTO_MAX_ALG_NAME];
1194 char driver_name[CRYPTO_MAX_ALG_NAME];
1195 unsigned int blocksize;
1196 u32 type;
1197 union {
1198 struct ablkcipher_alg ablkcipher;
1199 } template_u;
1200 u32 class1_alg_type;
1201 u32 class2_alg_type;
1202 };
1203
1204 static struct caam_alg_template driver_algs[] = {
1205 /* ablkcipher descriptor */
1206 {
1207 .name = "cbc(aes)",
1208 .driver_name = "cbc-aes-caam-qi",
1209 .blocksize = AES_BLOCK_SIZE,
1210 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1211 .template_ablkcipher = {
1212 .setkey = ablkcipher_setkey,
1213 .encrypt = ablkcipher_encrypt,
1214 .decrypt = ablkcipher_decrypt,
1215 .givencrypt = ablkcipher_givencrypt,
1216 .geniv = "<built-in>",
1217 .min_keysize = AES_MIN_KEY_SIZE,
1218 .max_keysize = AES_MAX_KEY_SIZE,
1219 .ivsize = AES_BLOCK_SIZE,
1220 },
1221 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1222 },
1223 {
1224 .name = "cbc(des3_ede)",
1225 .driver_name = "cbc-3des-caam-qi",
1226 .blocksize = DES3_EDE_BLOCK_SIZE,
1227 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1228 .template_ablkcipher = {
1229 .setkey = ablkcipher_setkey,
1230 .encrypt = ablkcipher_encrypt,
1231 .decrypt = ablkcipher_decrypt,
1232 .givencrypt = ablkcipher_givencrypt,
1233 .geniv = "<built-in>",
1234 .min_keysize = DES3_EDE_KEY_SIZE,
1235 .max_keysize = DES3_EDE_KEY_SIZE,
1236 .ivsize = DES3_EDE_BLOCK_SIZE,
1237 },
1238 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1239 },
1240 {
1241 .name = "cbc(des)",
1242 .driver_name = "cbc-des-caam-qi",
1243 .blocksize = DES_BLOCK_SIZE,
1244 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1245 .template_ablkcipher = {
1246 .setkey = ablkcipher_setkey,
1247 .encrypt = ablkcipher_encrypt,
1248 .decrypt = ablkcipher_decrypt,
1249 .givencrypt = ablkcipher_givencrypt,
1250 .geniv = "<built-in>",
1251 .min_keysize = DES_KEY_SIZE,
1252 .max_keysize = DES_KEY_SIZE,
1253 .ivsize = DES_BLOCK_SIZE,
1254 },
1255 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1256 },
1257 {
1258 .name = "ctr(aes)",
1259 .driver_name = "ctr-aes-caam-qi",
1260 .blocksize = 1,
1261 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1262 .template_ablkcipher = {
1263 .setkey = ablkcipher_setkey,
1264 .encrypt = ablkcipher_encrypt,
1265 .decrypt = ablkcipher_decrypt,
1266 .geniv = "chainiv",
1267 .min_keysize = AES_MIN_KEY_SIZE,
1268 .max_keysize = AES_MAX_KEY_SIZE,
1269 .ivsize = AES_BLOCK_SIZE,
1270 },
1271 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1272 },
1273 {
1274 .name = "rfc3686(ctr(aes))",
1275 .driver_name = "rfc3686-ctr-aes-caam-qi",
1276 .blocksize = 1,
1277 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1278 .template_ablkcipher = {
1279 .setkey = ablkcipher_setkey,
1280 .encrypt = ablkcipher_encrypt,
1281 .decrypt = ablkcipher_decrypt,
1282 .givencrypt = ablkcipher_givencrypt,
1283 .geniv = "<built-in>",
1284 .min_keysize = AES_MIN_KEY_SIZE +
1285 CTR_RFC3686_NONCE_SIZE,
1286 .max_keysize = AES_MAX_KEY_SIZE +
1287 CTR_RFC3686_NONCE_SIZE,
1288 .ivsize = CTR_RFC3686_IV_SIZE,
1289 },
1290 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1291 },
1292 {
1293 .name = "xts(aes)",
1294 .driver_name = "xts-aes-caam-qi",
1295 .blocksize = AES_BLOCK_SIZE,
1296 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1297 .template_ablkcipher = {
1298 .setkey = xts_ablkcipher_setkey,
1299 .encrypt = ablkcipher_encrypt,
1300 .decrypt = ablkcipher_decrypt,
1301 .geniv = "eseqiv",
1302 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1303 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1304 .ivsize = AES_BLOCK_SIZE,
1305 },
1306 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1307 },
1308 };
1309
1310 static struct caam_aead_alg driver_aeads[] = {
1311 /* single-pass ipsec_esp descriptor */
1312 {
1313 .aead = {
1314 .base = {
1315 .cra_name = "authenc(hmac(md5),cbc(aes))",
1316 .cra_driver_name = "authenc-hmac-md5-"
1317 "cbc-aes-caam-qi",
1318 .cra_blocksize = AES_BLOCK_SIZE,
1319 },
1320 .setkey = aead_setkey,
1321 .setauthsize = aead_setauthsize,
1322 .encrypt = aead_encrypt,
1323 .decrypt = aead_decrypt,
1324 .ivsize = AES_BLOCK_SIZE,
1325 .maxauthsize = MD5_DIGEST_SIZE,
1326 },
1327 .caam = {
1328 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1329 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1330 OP_ALG_AAI_HMAC_PRECOMP,
1331 }
1332 },
1333 {
1334 .aead = {
1335 .base = {
1336 .cra_name = "echainiv(authenc(hmac(md5),"
1337 "cbc(aes)))",
1338 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1339 "cbc-aes-caam-qi",
1340 .cra_blocksize = AES_BLOCK_SIZE,
1341 },
1342 .setkey = aead_setkey,
1343 .setauthsize = aead_setauthsize,
1344 .encrypt = aead_encrypt,
1345 .decrypt = aead_decrypt,
1346 .ivsize = AES_BLOCK_SIZE,
1347 .maxauthsize = MD5_DIGEST_SIZE,
1348 },
1349 .caam = {
1350 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1351 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1352 OP_ALG_AAI_HMAC_PRECOMP,
1353 .geniv = true,
1354 }
1355 },
1356 {
1357 .aead = {
1358 .base = {
1359 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1360 .cra_driver_name = "authenc-hmac-sha1-"
1361 "cbc-aes-caam-qi",
1362 .cra_blocksize = AES_BLOCK_SIZE,
1363 },
1364 .setkey = aead_setkey,
1365 .setauthsize = aead_setauthsize,
1366 .encrypt = aead_encrypt,
1367 .decrypt = aead_decrypt,
1368 .ivsize = AES_BLOCK_SIZE,
1369 .maxauthsize = SHA1_DIGEST_SIZE,
1370 },
1371 .caam = {
1372 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1373 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1374 OP_ALG_AAI_HMAC_PRECOMP,
1375 }
1376 },
1377 {
1378 .aead = {
1379 .base = {
1380 .cra_name = "echainiv(authenc(hmac(sha1),"
1381 "cbc(aes)))",
1382 .cra_driver_name = "echainiv-authenc-"
1383 "hmac-sha1-cbc-aes-caam-qi",
1384 .cra_blocksize = AES_BLOCK_SIZE,
1385 },
1386 .setkey = aead_setkey,
1387 .setauthsize = aead_setauthsize,
1388 .encrypt = aead_encrypt,
1389 .decrypt = aead_decrypt,
1390 .ivsize = AES_BLOCK_SIZE,
1391 .maxauthsize = SHA1_DIGEST_SIZE,
1392 },
1393 .caam = {
1394 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1395 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1396 OP_ALG_AAI_HMAC_PRECOMP,
1397 .geniv = true,
1398 },
1399 },
1400 {
1401 .aead = {
1402 .base = {
1403 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1404 .cra_driver_name = "authenc-hmac-sha224-"
1405 "cbc-aes-caam-qi",
1406 .cra_blocksize = AES_BLOCK_SIZE,
1407 },
1408 .setkey = aead_setkey,
1409 .setauthsize = aead_setauthsize,
1410 .encrypt = aead_encrypt,
1411 .decrypt = aead_decrypt,
1412 .ivsize = AES_BLOCK_SIZE,
1413 .maxauthsize = SHA224_DIGEST_SIZE,
1414 },
1415 .caam = {
1416 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1417 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1418 OP_ALG_AAI_HMAC_PRECOMP,
1419 }
1420 },
1421 {
1422 .aead = {
1423 .base = {
1424 .cra_name = "echainiv(authenc(hmac(sha224),"
1425 "cbc(aes)))",
1426 .cra_driver_name = "echainiv-authenc-"
1427 "hmac-sha224-cbc-aes-caam-qi",
1428 .cra_blocksize = AES_BLOCK_SIZE,
1429 },
1430 .setkey = aead_setkey,
1431 .setauthsize = aead_setauthsize,
1432 .encrypt = aead_encrypt,
1433 .decrypt = aead_decrypt,
1434 .ivsize = AES_BLOCK_SIZE,
1435 .maxauthsize = SHA224_DIGEST_SIZE,
1436 },
1437 .caam = {
1438 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1439 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1440 OP_ALG_AAI_HMAC_PRECOMP,
1441 .geniv = true,
1442 }
1443 },
1444 {
1445 .aead = {
1446 .base = {
1447 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1448 .cra_driver_name = "authenc-hmac-sha256-"
1449 "cbc-aes-caam-qi",
1450 .cra_blocksize = AES_BLOCK_SIZE,
1451 },
1452 .setkey = aead_setkey,
1453 .setauthsize = aead_setauthsize,
1454 .encrypt = aead_encrypt,
1455 .decrypt = aead_decrypt,
1456 .ivsize = AES_BLOCK_SIZE,
1457 .maxauthsize = SHA256_DIGEST_SIZE,
1458 },
1459 .caam = {
1460 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1461 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1462 OP_ALG_AAI_HMAC_PRECOMP,
1463 }
1464 },
1465 {
1466 .aead = {
1467 .base = {
1468 .cra_name = "echainiv(authenc(hmac(sha256),"
1469 "cbc(aes)))",
1470 .cra_driver_name = "echainiv-authenc-"
1471 "hmac-sha256-cbc-aes-"
1472 "caam-qi",
1473 .cra_blocksize = AES_BLOCK_SIZE,
1474 },
1475 .setkey = aead_setkey,
1476 .setauthsize = aead_setauthsize,
1477 .encrypt = aead_encrypt,
1478 .decrypt = aead_decrypt,
1479 .ivsize = AES_BLOCK_SIZE,
1480 .maxauthsize = SHA256_DIGEST_SIZE,
1481 },
1482 .caam = {
1483 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1484 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1485 OP_ALG_AAI_HMAC_PRECOMP,
1486 .geniv = true,
1487 }
1488 },
1489 {
1490 .aead = {
1491 .base = {
1492 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1493 .cra_driver_name = "authenc-hmac-sha384-"
1494 "cbc-aes-caam-qi",
1495 .cra_blocksize = AES_BLOCK_SIZE,
1496 },
1497 .setkey = aead_setkey,
1498 .setauthsize = aead_setauthsize,
1499 .encrypt = aead_encrypt,
1500 .decrypt = aead_decrypt,
1501 .ivsize = AES_BLOCK_SIZE,
1502 .maxauthsize = SHA384_DIGEST_SIZE,
1503 },
1504 .caam = {
1505 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1506 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1507 OP_ALG_AAI_HMAC_PRECOMP,
1508 }
1509 },
1510 {
1511 .aead = {
1512 .base = {
1513 .cra_name = "echainiv(authenc(hmac(sha384),"
1514 "cbc(aes)))",
1515 .cra_driver_name = "echainiv-authenc-"
1516 "hmac-sha384-cbc-aes-"
1517 "caam-qi",
1518 .cra_blocksize = AES_BLOCK_SIZE,
1519 },
1520 .setkey = aead_setkey,
1521 .setauthsize = aead_setauthsize,
1522 .encrypt = aead_encrypt,
1523 .decrypt = aead_decrypt,
1524 .ivsize = AES_BLOCK_SIZE,
1525 .maxauthsize = SHA384_DIGEST_SIZE,
1526 },
1527 .caam = {
1528 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1529 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1530 OP_ALG_AAI_HMAC_PRECOMP,
1531 .geniv = true,
1532 }
1533 },
1534 {
1535 .aead = {
1536 .base = {
1537 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1538 .cra_driver_name = "authenc-hmac-sha512-"
1539 "cbc-aes-caam-qi",
1540 .cra_blocksize = AES_BLOCK_SIZE,
1541 },
1542 .setkey = aead_setkey,
1543 .setauthsize = aead_setauthsize,
1544 .encrypt = aead_encrypt,
1545 .decrypt = aead_decrypt,
1546 .ivsize = AES_BLOCK_SIZE,
1547 .maxauthsize = SHA512_DIGEST_SIZE,
1548 },
1549 .caam = {
1550 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1551 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1552 OP_ALG_AAI_HMAC_PRECOMP,
1553 }
1554 },
1555 {
1556 .aead = {
1557 .base = {
1558 .cra_name = "echainiv(authenc(hmac(sha512),"
1559 "cbc(aes)))",
1560 .cra_driver_name = "echainiv-authenc-"
1561 "hmac-sha512-cbc-aes-"
1562 "caam-qi",
1563 .cra_blocksize = AES_BLOCK_SIZE,
1564 },
1565 .setkey = aead_setkey,
1566 .setauthsize = aead_setauthsize,
1567 .encrypt = aead_encrypt,
1568 .decrypt = aead_decrypt,
1569 .ivsize = AES_BLOCK_SIZE,
1570 .maxauthsize = SHA512_DIGEST_SIZE,
1571 },
1572 .caam = {
1573 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1574 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1575 OP_ALG_AAI_HMAC_PRECOMP,
1576 .geniv = true,
1577 }
1578 },
1579 {
1580 .aead = {
1581 .base = {
1582 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1583 .cra_driver_name = "authenc-hmac-md5-"
1584 "cbc-des3_ede-caam-qi",
1585 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1586 },
1587 .setkey = aead_setkey,
1588 .setauthsize = aead_setauthsize,
1589 .encrypt = aead_encrypt,
1590 .decrypt = aead_decrypt,
1591 .ivsize = DES3_EDE_BLOCK_SIZE,
1592 .maxauthsize = MD5_DIGEST_SIZE,
1593 },
1594 .caam = {
1595 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1596 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1597 OP_ALG_AAI_HMAC_PRECOMP,
1598 }
1599 },
1600 {
1601 .aead = {
1602 .base = {
1603 .cra_name = "echainiv(authenc(hmac(md5),"
1604 "cbc(des3_ede)))",
1605 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1606 "cbc-des3_ede-caam-qi",
1607 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1608 },
1609 .setkey = aead_setkey,
1610 .setauthsize = aead_setauthsize,
1611 .encrypt = aead_encrypt,
1612 .decrypt = aead_decrypt,
1613 .ivsize = DES3_EDE_BLOCK_SIZE,
1614 .maxauthsize = MD5_DIGEST_SIZE,
1615 },
1616 .caam = {
1617 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1618 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1619 OP_ALG_AAI_HMAC_PRECOMP,
1620 .geniv = true,
1621 }
1622 },
1623 {
1624 .aead = {
1625 .base = {
1626 .cra_name = "authenc(hmac(sha1),"
1627 "cbc(des3_ede))",
1628 .cra_driver_name = "authenc-hmac-sha1-"
1629 "cbc-des3_ede-caam-qi",
1630 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1631 },
1632 .setkey = aead_setkey,
1633 .setauthsize = aead_setauthsize,
1634 .encrypt = aead_encrypt,
1635 .decrypt = aead_decrypt,
1636 .ivsize = DES3_EDE_BLOCK_SIZE,
1637 .maxauthsize = SHA1_DIGEST_SIZE,
1638 },
1639 .caam = {
1640 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1641 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1642 OP_ALG_AAI_HMAC_PRECOMP,
1643 },
1644 },
1645 {
1646 .aead = {
1647 .base = {
1648 .cra_name = "echainiv(authenc(hmac(sha1),"
1649 "cbc(des3_ede)))",
1650 .cra_driver_name = "echainiv-authenc-"
1651 "hmac-sha1-"
1652 "cbc-des3_ede-caam-qi",
1653 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1654 },
1655 .setkey = aead_setkey,
1656 .setauthsize = aead_setauthsize,
1657 .encrypt = aead_encrypt,
1658 .decrypt = aead_decrypt,
1659 .ivsize = DES3_EDE_BLOCK_SIZE,
1660 .maxauthsize = SHA1_DIGEST_SIZE,
1661 },
1662 .caam = {
1663 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1664 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1665 OP_ALG_AAI_HMAC_PRECOMP,
1666 .geniv = true,
1667 }
1668 },
1669 {
1670 .aead = {
1671 .base = {
1672 .cra_name = "authenc(hmac(sha224),"
1673 "cbc(des3_ede))",
1674 .cra_driver_name = "authenc-hmac-sha224-"
1675 "cbc-des3_ede-caam-qi",
1676 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1677 },
1678 .setkey = aead_setkey,
1679 .setauthsize = aead_setauthsize,
1680 .encrypt = aead_encrypt,
1681 .decrypt = aead_decrypt,
1682 .ivsize = DES3_EDE_BLOCK_SIZE,
1683 .maxauthsize = SHA224_DIGEST_SIZE,
1684 },
1685 .caam = {
1686 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1687 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1688 OP_ALG_AAI_HMAC_PRECOMP,
1689 },
1690 },
1691 {
1692 .aead = {
1693 .base = {
1694 .cra_name = "echainiv(authenc(hmac(sha224),"
1695 "cbc(des3_ede)))",
1696 .cra_driver_name = "echainiv-authenc-"
1697 "hmac-sha224-"
1698 "cbc-des3_ede-caam-qi",
1699 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1700 },
1701 .setkey = aead_setkey,
1702 .setauthsize = aead_setauthsize,
1703 .encrypt = aead_encrypt,
1704 .decrypt = aead_decrypt,
1705 .ivsize = DES3_EDE_BLOCK_SIZE,
1706 .maxauthsize = SHA224_DIGEST_SIZE,
1707 },
1708 .caam = {
1709 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1710 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1711 OP_ALG_AAI_HMAC_PRECOMP,
1712 .geniv = true,
1713 }
1714 },
1715 {
1716 .aead = {
1717 .base = {
1718 .cra_name = "authenc(hmac(sha256),"
1719 "cbc(des3_ede))",
1720 .cra_driver_name = "authenc-hmac-sha256-"
1721 "cbc-des3_ede-caam-qi",
1722 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1723 },
1724 .setkey = aead_setkey,
1725 .setauthsize = aead_setauthsize,
1726 .encrypt = aead_encrypt,
1727 .decrypt = aead_decrypt,
1728 .ivsize = DES3_EDE_BLOCK_SIZE,
1729 .maxauthsize = SHA256_DIGEST_SIZE,
1730 },
1731 .caam = {
1732 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1733 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1734 OP_ALG_AAI_HMAC_PRECOMP,
1735 },
1736 },
1737 {
1738 .aead = {
1739 .base = {
1740 .cra_name = "echainiv(authenc(hmac(sha256),"
1741 "cbc(des3_ede)))",
1742 .cra_driver_name = "echainiv-authenc-"
1743 "hmac-sha256-"
1744 "cbc-des3_ede-caam-qi",
1745 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1746 },
1747 .setkey = aead_setkey,
1748 .setauthsize = aead_setauthsize,
1749 .encrypt = aead_encrypt,
1750 .decrypt = aead_decrypt,
1751 .ivsize = DES3_EDE_BLOCK_SIZE,
1752 .maxauthsize = SHA256_DIGEST_SIZE,
1753 },
1754 .caam = {
1755 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1756 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1757 OP_ALG_AAI_HMAC_PRECOMP,
1758 .geniv = true,
1759 }
1760 },
1761 {
1762 .aead = {
1763 .base = {
1764 .cra_name = "authenc(hmac(sha384),"
1765 "cbc(des3_ede))",
1766 .cra_driver_name = "authenc-hmac-sha384-"
1767 "cbc-des3_ede-caam-qi",
1768 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1769 },
1770 .setkey = aead_setkey,
1771 .setauthsize = aead_setauthsize,
1772 .encrypt = aead_encrypt,
1773 .decrypt = aead_decrypt,
1774 .ivsize = DES3_EDE_BLOCK_SIZE,
1775 .maxauthsize = SHA384_DIGEST_SIZE,
1776 },
1777 .caam = {
1778 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1779 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1780 OP_ALG_AAI_HMAC_PRECOMP,
1781 },
1782 },
1783 {
1784 .aead = {
1785 .base = {
1786 .cra_name = "echainiv(authenc(hmac(sha384),"
1787 "cbc(des3_ede)))",
1788 .cra_driver_name = "echainiv-authenc-"
1789 "hmac-sha384-"
1790 "cbc-des3_ede-caam-qi",
1791 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1792 },
1793 .setkey = aead_setkey,
1794 .setauthsize = aead_setauthsize,
1795 .encrypt = aead_encrypt,
1796 .decrypt = aead_decrypt,
1797 .ivsize = DES3_EDE_BLOCK_SIZE,
1798 .maxauthsize = SHA384_DIGEST_SIZE,
1799 },
1800 .caam = {
1801 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1802 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1803 OP_ALG_AAI_HMAC_PRECOMP,
1804 .geniv = true,
1805 }
1806 },
1807 {
1808 .aead = {
1809 .base = {
1810 .cra_name = "authenc(hmac(sha512),"
1811 "cbc(des3_ede))",
1812 .cra_driver_name = "authenc-hmac-sha512-"
1813 "cbc-des3_ede-caam-qi",
1814 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1815 },
1816 .setkey = aead_setkey,
1817 .setauthsize = aead_setauthsize,
1818 .encrypt = aead_encrypt,
1819 .decrypt = aead_decrypt,
1820 .ivsize = DES3_EDE_BLOCK_SIZE,
1821 .maxauthsize = SHA512_DIGEST_SIZE,
1822 },
1823 .caam = {
1824 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1825 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1826 OP_ALG_AAI_HMAC_PRECOMP,
1827 },
1828 },
1829 {
1830 .aead = {
1831 .base = {
1832 .cra_name = "echainiv(authenc(hmac(sha512),"
1833 "cbc(des3_ede)))",
1834 .cra_driver_name = "echainiv-authenc-"
1835 "hmac-sha512-"
1836 "cbc-des3_ede-caam-qi",
1837 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1838 },
1839 .setkey = aead_setkey,
1840 .setauthsize = aead_setauthsize,
1841 .encrypt = aead_encrypt,
1842 .decrypt = aead_decrypt,
1843 .ivsize = DES3_EDE_BLOCK_SIZE,
1844 .maxauthsize = SHA512_DIGEST_SIZE,
1845 },
1846 .caam = {
1847 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1848 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1849 OP_ALG_AAI_HMAC_PRECOMP,
1850 .geniv = true,
1851 }
1852 },
1853 {
1854 .aead = {
1855 .base = {
1856 .cra_name = "authenc(hmac(md5),cbc(des))",
1857 .cra_driver_name = "authenc-hmac-md5-"
1858 "cbc-des-caam-qi",
1859 .cra_blocksize = DES_BLOCK_SIZE,
1860 },
1861 .setkey = aead_setkey,
1862 .setauthsize = aead_setauthsize,
1863 .encrypt = aead_encrypt,
1864 .decrypt = aead_decrypt,
1865 .ivsize = DES_BLOCK_SIZE,
1866 .maxauthsize = MD5_DIGEST_SIZE,
1867 },
1868 .caam = {
1869 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1870 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1871 OP_ALG_AAI_HMAC_PRECOMP,
1872 },
1873 },
1874 {
1875 .aead = {
1876 .base = {
1877 .cra_name = "echainiv(authenc(hmac(md5),"
1878 "cbc(des)))",
1879 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1880 "cbc-des-caam-qi",
1881 .cra_blocksize = DES_BLOCK_SIZE,
1882 },
1883 .setkey = aead_setkey,
1884 .setauthsize = aead_setauthsize,
1885 .encrypt = aead_encrypt,
1886 .decrypt = aead_decrypt,
1887 .ivsize = DES_BLOCK_SIZE,
1888 .maxauthsize = MD5_DIGEST_SIZE,
1889 },
1890 .caam = {
1891 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1892 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1893 OP_ALG_AAI_HMAC_PRECOMP,
1894 .geniv = true,
1895 }
1896 },
1897 {
1898 .aead = {
1899 .base = {
1900 .cra_name = "authenc(hmac(sha1),cbc(des))",
1901 .cra_driver_name = "authenc-hmac-sha1-"
1902 "cbc-des-caam-qi",
1903 .cra_blocksize = DES_BLOCK_SIZE,
1904 },
1905 .setkey = aead_setkey,
1906 .setauthsize = aead_setauthsize,
1907 .encrypt = aead_encrypt,
1908 .decrypt = aead_decrypt,
1909 .ivsize = DES_BLOCK_SIZE,
1910 .maxauthsize = SHA1_DIGEST_SIZE,
1911 },
1912 .caam = {
1913 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1914 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1915 OP_ALG_AAI_HMAC_PRECOMP,
1916 },
1917 },
1918 {
1919 .aead = {
1920 .base = {
1921 .cra_name = "echainiv(authenc(hmac(sha1),"
1922 "cbc(des)))",
1923 .cra_driver_name = "echainiv-authenc-"
1924 "hmac-sha1-cbc-des-caam-qi",
1925 .cra_blocksize = DES_BLOCK_SIZE,
1926 },
1927 .setkey = aead_setkey,
1928 .setauthsize = aead_setauthsize,
1929 .encrypt = aead_encrypt,
1930 .decrypt = aead_decrypt,
1931 .ivsize = DES_BLOCK_SIZE,
1932 .maxauthsize = SHA1_DIGEST_SIZE,
1933 },
1934 .caam = {
1935 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1936 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1937 OP_ALG_AAI_HMAC_PRECOMP,
1938 .geniv = true,
1939 }
1940 },
1941 {
1942 .aead = {
1943 .base = {
1944 .cra_name = "authenc(hmac(sha224),cbc(des))",
1945 .cra_driver_name = "authenc-hmac-sha224-"
1946 "cbc-des-caam-qi",
1947 .cra_blocksize = DES_BLOCK_SIZE,
1948 },
1949 .setkey = aead_setkey,
1950 .setauthsize = aead_setauthsize,
1951 .encrypt = aead_encrypt,
1952 .decrypt = aead_decrypt,
1953 .ivsize = DES_BLOCK_SIZE,
1954 .maxauthsize = SHA224_DIGEST_SIZE,
1955 },
1956 .caam = {
1957 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1958 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1959 OP_ALG_AAI_HMAC_PRECOMP,
1960 },
1961 },
1962 {
1963 .aead = {
1964 .base = {
1965 .cra_name = "echainiv(authenc(hmac(sha224),"
1966 "cbc(des)))",
1967 .cra_driver_name = "echainiv-authenc-"
1968 "hmac-sha224-cbc-des-"
1969 "caam-qi",
1970 .cra_blocksize = DES_BLOCK_SIZE,
1971 },
1972 .setkey = aead_setkey,
1973 .setauthsize = aead_setauthsize,
1974 .encrypt = aead_encrypt,
1975 .decrypt = aead_decrypt,
1976 .ivsize = DES_BLOCK_SIZE,
1977 .maxauthsize = SHA224_DIGEST_SIZE,
1978 },
1979 .caam = {
1980 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1981 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1982 OP_ALG_AAI_HMAC_PRECOMP,
1983 .geniv = true,
1984 }
1985 },
1986 {
1987 .aead = {
1988 .base = {
1989 .cra_name = "authenc(hmac(sha256),cbc(des))",
1990 .cra_driver_name = "authenc-hmac-sha256-"
1991 "cbc-des-caam-qi",
1992 .cra_blocksize = DES_BLOCK_SIZE,
1993 },
1994 .setkey = aead_setkey,
1995 .setauthsize = aead_setauthsize,
1996 .encrypt = aead_encrypt,
1997 .decrypt = aead_decrypt,
1998 .ivsize = DES_BLOCK_SIZE,
1999 .maxauthsize = SHA256_DIGEST_SIZE,
2000 },
2001 .caam = {
2002 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2003 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2004 OP_ALG_AAI_HMAC_PRECOMP,
2005 },
2006 },
2007 {
2008 .aead = {
2009 .base = {
2010 .cra_name = "echainiv(authenc(hmac(sha256),"
2011 "cbc(des)))",
2012 .cra_driver_name = "echainiv-authenc-"
2013 "hmac-sha256-cbc-des-"
2014 "caam-qi",
2015 .cra_blocksize = DES_BLOCK_SIZE,
2016 },
2017 .setkey = aead_setkey,
2018 .setauthsize = aead_setauthsize,
2019 .encrypt = aead_encrypt,
2020 .decrypt = aead_decrypt,
2021 .ivsize = DES_BLOCK_SIZE,
2022 .maxauthsize = SHA256_DIGEST_SIZE,
2023 },
2024 .caam = {
2025 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2026 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2027 OP_ALG_AAI_HMAC_PRECOMP,
2028 .geniv = true,
2029 },
2030 },
2031 {
2032 .aead = {
2033 .base = {
2034 .cra_name = "authenc(hmac(sha384),cbc(des))",
2035 .cra_driver_name = "authenc-hmac-sha384-"
2036 "cbc-des-caam-qi",
2037 .cra_blocksize = DES_BLOCK_SIZE,
2038 },
2039 .setkey = aead_setkey,
2040 .setauthsize = aead_setauthsize,
2041 .encrypt = aead_encrypt,
2042 .decrypt = aead_decrypt,
2043 .ivsize = DES_BLOCK_SIZE,
2044 .maxauthsize = SHA384_DIGEST_SIZE,
2045 },
2046 .caam = {
2047 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2048 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2049 OP_ALG_AAI_HMAC_PRECOMP,
2050 },
2051 },
2052 {
2053 .aead = {
2054 .base = {
2055 .cra_name = "echainiv(authenc(hmac(sha384),"
2056 "cbc(des)))",
2057 .cra_driver_name = "echainiv-authenc-"
2058 "hmac-sha384-cbc-des-"
2059 "caam-qi",
2060 .cra_blocksize = DES_BLOCK_SIZE,
2061 },
2062 .setkey = aead_setkey,
2063 .setauthsize = aead_setauthsize,
2064 .encrypt = aead_encrypt,
2065 .decrypt = aead_decrypt,
2066 .ivsize = DES_BLOCK_SIZE,
2067 .maxauthsize = SHA384_DIGEST_SIZE,
2068 },
2069 .caam = {
2070 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2071 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2072 OP_ALG_AAI_HMAC_PRECOMP,
2073 .geniv = true,
2074 }
2075 },
2076 {
2077 .aead = {
2078 .base = {
2079 .cra_name = "authenc(hmac(sha512),cbc(des))",
2080 .cra_driver_name = "authenc-hmac-sha512-"
2081 "cbc-des-caam-qi",
2082 .cra_blocksize = DES_BLOCK_SIZE,
2083 },
2084 .setkey = aead_setkey,
2085 .setauthsize = aead_setauthsize,
2086 .encrypt = aead_encrypt,
2087 .decrypt = aead_decrypt,
2088 .ivsize = DES_BLOCK_SIZE,
2089 .maxauthsize = SHA512_DIGEST_SIZE,
2090 },
2091 .caam = {
2092 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2093 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2094 OP_ALG_AAI_HMAC_PRECOMP,
2095 }
2096 },
2097 {
2098 .aead = {
2099 .base = {
2100 .cra_name = "echainiv(authenc(hmac(sha512),"
2101 "cbc(des)))",
2102 .cra_driver_name = "echainiv-authenc-"
2103 "hmac-sha512-cbc-des-"
2104 "caam-qi",
2105 .cra_blocksize = DES_BLOCK_SIZE,
2106 },
2107 .setkey = aead_setkey,
2108 .setauthsize = aead_setauthsize,
2109 .encrypt = aead_encrypt,
2110 .decrypt = aead_decrypt,
2111 .ivsize = DES_BLOCK_SIZE,
2112 .maxauthsize = SHA512_DIGEST_SIZE,
2113 },
2114 .caam = {
2115 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2116 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2117 OP_ALG_AAI_HMAC_PRECOMP,
2118 .geniv = true,
2119 }
2120 },
2121 };
2122
2123 struct caam_crypto_alg {
2124 struct list_head entry;
2125 struct crypto_alg crypto_alg;
2126 struct caam_alg_entry caam;
2127 };
2128
caam_init_common(struct caam_ctx * ctx,struct caam_alg_entry * caam)2129 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
2130 {
2131 struct caam_drv_private *priv;
2132
2133 /*
2134 * distribute tfms across job rings to ensure in-order
2135 * crypto request processing per tfm
2136 */
2137 ctx->jrdev = caam_jr_alloc();
2138 if (IS_ERR(ctx->jrdev)) {
2139 pr_err("Job Ring Device allocation for transform failed\n");
2140 return PTR_ERR(ctx->jrdev);
2141 }
2142
2143 ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
2144 DMA_TO_DEVICE);
2145 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
2146 dev_err(ctx->jrdev, "unable to map key\n");
2147 caam_jr_free(ctx->jrdev);
2148 return -ENOMEM;
2149 }
2150
2151 /* copy descriptor header template value */
2152 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
2153 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
2154
2155 priv = dev_get_drvdata(ctx->jrdev->parent);
2156 ctx->qidev = priv->qidev;
2157
2158 spin_lock_init(&ctx->lock);
2159 ctx->drv_ctx[ENCRYPT] = NULL;
2160 ctx->drv_ctx[DECRYPT] = NULL;
2161 ctx->drv_ctx[GIVENCRYPT] = NULL;
2162
2163 return 0;
2164 }
2165
caam_cra_init(struct crypto_tfm * tfm)2166 static int caam_cra_init(struct crypto_tfm *tfm)
2167 {
2168 struct crypto_alg *alg = tfm->__crt_alg;
2169 struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2170 crypto_alg);
2171 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2172
2173 return caam_init_common(ctx, &caam_alg->caam);
2174 }
2175
caam_aead_init(struct crypto_aead * tfm)2176 static int caam_aead_init(struct crypto_aead *tfm)
2177 {
2178 struct aead_alg *alg = crypto_aead_alg(tfm);
2179 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2180 aead);
2181 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2182
2183 return caam_init_common(ctx, &caam_alg->caam);
2184 }
2185
caam_exit_common(struct caam_ctx * ctx)2186 static void caam_exit_common(struct caam_ctx *ctx)
2187 {
2188 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
2189 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
2190 caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
2191
2192 dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
2193 DMA_TO_DEVICE);
2194
2195 caam_jr_free(ctx->jrdev);
2196 }
2197
caam_cra_exit(struct crypto_tfm * tfm)2198 static void caam_cra_exit(struct crypto_tfm *tfm)
2199 {
2200 caam_exit_common(crypto_tfm_ctx(tfm));
2201 }
2202
caam_aead_exit(struct crypto_aead * tfm)2203 static void caam_aead_exit(struct crypto_aead *tfm)
2204 {
2205 caam_exit_common(crypto_aead_ctx(tfm));
2206 }
2207
2208 static struct list_head alg_list;
caam_qi_algapi_exit(void)2209 static void __exit caam_qi_algapi_exit(void)
2210 {
2211 struct caam_crypto_alg *t_alg, *n;
2212 int i;
2213
2214 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2215 struct caam_aead_alg *t_alg = driver_aeads + i;
2216
2217 if (t_alg->registered)
2218 crypto_unregister_aead(&t_alg->aead);
2219 }
2220
2221 if (!alg_list.next)
2222 return;
2223
2224 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2225 crypto_unregister_alg(&t_alg->crypto_alg);
2226 list_del(&t_alg->entry);
2227 kfree(t_alg);
2228 }
2229 }
2230
caam_alg_alloc(struct caam_alg_template * template)2231 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2232 *template)
2233 {
2234 struct caam_crypto_alg *t_alg;
2235 struct crypto_alg *alg;
2236
2237 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2238 if (!t_alg)
2239 return ERR_PTR(-ENOMEM);
2240
2241 alg = &t_alg->crypto_alg;
2242
2243 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2244 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2245 template->driver_name);
2246 alg->cra_module = THIS_MODULE;
2247 alg->cra_init = caam_cra_init;
2248 alg->cra_exit = caam_cra_exit;
2249 alg->cra_priority = CAAM_CRA_PRIORITY;
2250 alg->cra_blocksize = template->blocksize;
2251 alg->cra_alignmask = 0;
2252 alg->cra_ctxsize = sizeof(struct caam_ctx);
2253 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2254 template->type;
2255 switch (template->type) {
2256 case CRYPTO_ALG_TYPE_GIVCIPHER:
2257 alg->cra_type = &crypto_givcipher_type;
2258 alg->cra_ablkcipher = template->template_ablkcipher;
2259 break;
2260 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2261 alg->cra_type = &crypto_ablkcipher_type;
2262 alg->cra_ablkcipher = template->template_ablkcipher;
2263 break;
2264 }
2265
2266 t_alg->caam.class1_alg_type = template->class1_alg_type;
2267 t_alg->caam.class2_alg_type = template->class2_alg_type;
2268
2269 return t_alg;
2270 }
2271
caam_aead_alg_init(struct caam_aead_alg * t_alg)2272 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2273 {
2274 struct aead_alg *alg = &t_alg->aead;
2275
2276 alg->base.cra_module = THIS_MODULE;
2277 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2278 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2279 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2280
2281 alg->init = caam_aead_init;
2282 alg->exit = caam_aead_exit;
2283 }
2284
caam_qi_algapi_init(void)2285 static int __init caam_qi_algapi_init(void)
2286 {
2287 struct device_node *dev_node;
2288 struct platform_device *pdev;
2289 struct device *ctrldev;
2290 struct caam_drv_private *priv;
2291 int i = 0, err = 0;
2292 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
2293 unsigned int md_limit = SHA512_DIGEST_SIZE;
2294 bool registered = false;
2295
2296 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2297 if (!dev_node) {
2298 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2299 if (!dev_node)
2300 return -ENODEV;
2301 }
2302
2303 pdev = of_find_device_by_node(dev_node);
2304 of_node_put(dev_node);
2305 if (!pdev)
2306 return -ENODEV;
2307
2308 ctrldev = &pdev->dev;
2309 priv = dev_get_drvdata(ctrldev);
2310
2311 /*
2312 * If priv is NULL, it's probably because the caam driver wasn't
2313 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2314 */
2315 if (!priv || !priv->qi_present)
2316 return -ENODEV;
2317
2318 INIT_LIST_HEAD(&alg_list);
2319
2320 /*
2321 * Register crypto algorithms the device supports.
2322 * First, detect presence and attributes of DES, AES, and MD blocks.
2323 */
2324 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2325 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2326 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
2327 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
2328 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2329
2330 /* If MD is present, limit digest size based on LP256 */
2331 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
2332 md_limit = SHA256_DIGEST_SIZE;
2333
2334 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2335 struct caam_crypto_alg *t_alg;
2336 struct caam_alg_template *alg = driver_algs + i;
2337 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
2338
2339 /* Skip DES algorithms if not supported by device */
2340 if (!des_inst &&
2341 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
2342 (alg_sel == OP_ALG_ALGSEL_DES)))
2343 continue;
2344
2345 /* Skip AES algorithms if not supported by device */
2346 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
2347 continue;
2348
2349 t_alg = caam_alg_alloc(alg);
2350 if (IS_ERR(t_alg)) {
2351 err = PTR_ERR(t_alg);
2352 dev_warn(priv->qidev, "%s alg allocation failed\n",
2353 alg->driver_name);
2354 continue;
2355 }
2356
2357 err = crypto_register_alg(&t_alg->crypto_alg);
2358 if (err) {
2359 dev_warn(priv->qidev, "%s alg registration failed\n",
2360 t_alg->crypto_alg.cra_driver_name);
2361 kfree(t_alg);
2362 continue;
2363 }
2364
2365 list_add_tail(&t_alg->entry, &alg_list);
2366 registered = true;
2367 }
2368
2369 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2370 struct caam_aead_alg *t_alg = driver_aeads + i;
2371 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
2372 OP_ALG_ALGSEL_MASK;
2373 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
2374 OP_ALG_ALGSEL_MASK;
2375 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
2376
2377 /* Skip DES algorithms if not supported by device */
2378 if (!des_inst &&
2379 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
2380 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
2381 continue;
2382
2383 /* Skip AES algorithms if not supported by device */
2384 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
2385 continue;
2386
2387 /*
2388 * Check support for AES algorithms not available
2389 * on LP devices.
2390 */
2391 if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
2392 (alg_aai == OP_ALG_AAI_GCM))
2393 continue;
2394
2395 /*
2396 * Skip algorithms requiring message digests
2397 * if MD or MD size is not supported by device.
2398 */
2399 if (c2_alg_sel &&
2400 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
2401 continue;
2402
2403 caam_aead_alg_init(t_alg);
2404
2405 err = crypto_register_aead(&t_alg->aead);
2406 if (err) {
2407 pr_warn("%s alg registration failed\n",
2408 t_alg->aead.base.cra_driver_name);
2409 continue;
2410 }
2411
2412 t_alg->registered = true;
2413 registered = true;
2414 }
2415
2416 if (registered)
2417 dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
2418
2419 return err;
2420 }
2421
2422 module_init(caam_qi_algapi_init);
2423 module_exit(caam_qi_algapi_exit);
2424
2425 MODULE_LICENSE("GPL");
2426 MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
2427 MODULE_AUTHOR("Freescale Semiconductor");
2428