1 /*
2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
3 *
4 * Copyright 2011 Freescale Semiconductor, Inc.
5 *
6 * Based on caamalg.c crypto API driver.
7 *
8 * relationship of digest job descriptor or first job descriptor after init to
9 * shared descriptors:
10 *
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
15 * ---------------
16 *
17 * relationship of subsequent job descriptors to shared descriptors:
18 *
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
25 * --------------- | |
26 * | JobDesc #3 |------| |
27 * | *(packet 3) | |
28 * --------------- |
29 * . |
30 * . |
31 * --------------- |
32 * | JobDesc #4 |------------
33 * | *(packet 4) |
34 * ---------------
35 *
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
41 *
42 * So, a job desc looks like:
43 *
44 * ---------------------
45 * | Header |
46 * | ShareDesc Pointer |
47 * | SEQ_OUT_PTR |
48 * | (output buffer) |
49 * | (output length) |
50 * | SEQ_IN_PTR |
51 * | (input buffer) |
52 * | (input length) |
53 * ---------------------
54 */
55
56 #include "compat.h"
57
58 #include "regs.h"
59 #include "intern.h"
60 #include "desc_constr.h"
61 #include "jr.h"
62 #include "error.h"
63 #include "sg_sw_sec4.h"
64 #include "key_gen.h"
65
66 #define CAAM_CRA_PRIORITY 3000
67
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
70
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
73
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89
90 #ifdef DEBUG
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
93 #else
94 #define debug(format, arg...)
95 #endif
96
97
98 static struct list_head hash_list;
99
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102 struct device *jrdev;
103 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
104 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
105 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
106 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
107 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
108 dma_addr_t sh_desc_update_dma;
109 dma_addr_t sh_desc_update_first_dma;
110 dma_addr_t sh_desc_fin_dma;
111 dma_addr_t sh_desc_digest_dma;
112 dma_addr_t sh_desc_finup_dma;
113 u32 alg_type;
114 u32 alg_op;
115 u8 key[CAAM_MAX_HASH_KEY_SIZE];
116 dma_addr_t key_dma;
117 int ctx_len;
118 unsigned int split_key_len;
119 unsigned int split_key_pad_len;
120 };
121
122 /* ahash state */
123 struct caam_hash_state {
124 dma_addr_t buf_dma;
125 dma_addr_t ctx_dma;
126 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
127 int buflen_0;
128 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
129 int buflen_1;
130 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
131 int (*update)(struct ahash_request *req);
132 int (*final)(struct ahash_request *req);
133 int (*finup)(struct ahash_request *req);
134 int current_buf;
135 };
136
137 struct caam_export_state {
138 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
139 u8 caam_ctx[MAX_CTX_LEN];
140 int buflen;
141 int (*update)(struct ahash_request *req);
142 int (*final)(struct ahash_request *req);
143 int (*finup)(struct ahash_request *req);
144 };
145
146 /* Common job descriptor seq in/out ptr routines */
147
148 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
map_seq_out_ptr_ctx(u32 * desc,struct device * jrdev,struct caam_hash_state * state,int ctx_len)149 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
150 struct caam_hash_state *state,
151 int ctx_len)
152 {
153 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
154 ctx_len, DMA_FROM_DEVICE);
155 if (dma_mapping_error(jrdev, state->ctx_dma)) {
156 dev_err(jrdev, "unable to map ctx\n");
157 return -ENOMEM;
158 }
159
160 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
161
162 return 0;
163 }
164
165 /* Map req->result, and append seq_out_ptr command that points to it */
map_seq_out_ptr_result(u32 * desc,struct device * jrdev,u8 * result,int digestsize)166 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
167 u8 *result, int digestsize)
168 {
169 dma_addr_t dst_dma;
170
171 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
172 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
173
174 return dst_dma;
175 }
176
177 /* Map current buffer in state and put it in link table */
buf_map_to_sec4_sg(struct device * jrdev,struct sec4_sg_entry * sec4_sg,u8 * buf,int buflen)178 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
179 struct sec4_sg_entry *sec4_sg,
180 u8 *buf, int buflen)
181 {
182 dma_addr_t buf_dma;
183
184 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
185 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
186
187 return buf_dma;
188 }
189
190 /* Map req->src and put it in link table */
src_map_to_sec4_sg(struct device * jrdev,struct scatterlist * src,int src_nents,struct sec4_sg_entry * sec4_sg)191 static inline void src_map_to_sec4_sg(struct device *jrdev,
192 struct scatterlist *src, int src_nents,
193 struct sec4_sg_entry *sec4_sg)
194 {
195 dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE);
196 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
197 }
198
199 /*
200 * Only put buffer in link table if it contains data, which is possible,
201 * since a buffer has previously been used, and needs to be unmapped,
202 */
203 static inline dma_addr_t
try_buf_map_to_sec4_sg(struct device * jrdev,struct sec4_sg_entry * sec4_sg,u8 * buf,dma_addr_t buf_dma,int buflen,int last_buflen)204 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
205 u8 *buf, dma_addr_t buf_dma, int buflen,
206 int last_buflen)
207 {
208 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
209 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
210 if (buflen)
211 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
212 else
213 buf_dma = 0;
214
215 return buf_dma;
216 }
217
218 /* Map state->caam_ctx, and add it to link table */
ctx_map_to_sec4_sg(u32 * desc,struct device * jrdev,struct caam_hash_state * state,int ctx_len,struct sec4_sg_entry * sec4_sg,u32 flag)219 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
220 struct caam_hash_state *state, int ctx_len,
221 struct sec4_sg_entry *sec4_sg, u32 flag)
222 {
223 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
224 if (dma_mapping_error(jrdev, state->ctx_dma)) {
225 dev_err(jrdev, "unable to map ctx\n");
226 return -ENOMEM;
227 }
228
229 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
230
231 return 0;
232 }
233
234 /* Common shared descriptor commands */
append_key_ahash(u32 * desc,struct caam_hash_ctx * ctx)235 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
236 {
237 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
238 ctx->split_key_len, CLASS_2 |
239 KEY_DEST_MDHA_SPLIT | KEY_ENC);
240 }
241
242 /* Append key if it has been set */
init_sh_desc_key_ahash(u32 * desc,struct caam_hash_ctx * ctx)243 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
244 {
245 u32 *key_jump_cmd;
246
247 init_sh_desc(desc, HDR_SHARE_SERIAL);
248
249 if (ctx->split_key_len) {
250 /* Skip if already shared */
251 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
252 JUMP_COND_SHRD);
253
254 append_key_ahash(desc, ctx);
255
256 set_jump_tgt_here(desc, key_jump_cmd);
257 }
258
259 /* Propagate errors from shared to job descriptor */
260 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
261 }
262
263 /*
264 * For ahash read data from seqin following state->caam_ctx,
265 * and write resulting class2 context to seqout, which may be state->caam_ctx
266 * or req->result
267 */
ahash_append_load_str(u32 * desc,int digestsize)268 static inline void ahash_append_load_str(u32 *desc, int digestsize)
269 {
270 /* Calculate remaining bytes to read */
271 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
272
273 /* Read remaining bytes */
274 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
275 FIFOLD_TYPE_MSG | KEY_VLF);
276
277 /* Store class2 context bytes */
278 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
279 LDST_SRCDST_BYTE_CONTEXT);
280 }
281
282 /*
283 * For ahash update, final and finup, import context, read and write to seqout
284 */
ahash_ctx_data_to_out(u32 * desc,u32 op,u32 state,int digestsize,struct caam_hash_ctx * ctx)285 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
286 int digestsize,
287 struct caam_hash_ctx *ctx)
288 {
289 init_sh_desc_key_ahash(desc, ctx);
290
291 /* Import context from software */
292 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
293 LDST_CLASS_2_CCB | ctx->ctx_len);
294
295 /* Class 2 operation */
296 append_operation(desc, op | state | OP_ALG_ENCRYPT);
297
298 /*
299 * Load from buf and/or src and write to req->result or state->context
300 */
301 ahash_append_load_str(desc, digestsize);
302 }
303
304 /* For ahash firsts and digest, read and write to seqout */
ahash_data_to_out(u32 * desc,u32 op,u32 state,int digestsize,struct caam_hash_ctx * ctx)305 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
306 int digestsize, struct caam_hash_ctx *ctx)
307 {
308 init_sh_desc_key_ahash(desc, ctx);
309
310 /* Class 2 operation */
311 append_operation(desc, op | state | OP_ALG_ENCRYPT);
312
313 /*
314 * Load from buf and/or src and write to req->result or state->context
315 */
316 ahash_append_load_str(desc, digestsize);
317 }
318
ahash_set_sh_desc(struct crypto_ahash * ahash)319 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
320 {
321 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
322 int digestsize = crypto_ahash_digestsize(ahash);
323 struct device *jrdev = ctx->jrdev;
324 u32 have_key = 0;
325 u32 *desc;
326
327 if (ctx->split_key_len)
328 have_key = OP_ALG_AAI_HMAC_PRECOMP;
329
330 /* ahash_update shared descriptor */
331 desc = ctx->sh_desc_update;
332
333 init_sh_desc(desc, HDR_SHARE_SERIAL);
334
335 /* Import context from software */
336 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
337 LDST_CLASS_2_CCB | ctx->ctx_len);
338
339 /* Class 2 operation */
340 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
341 OP_ALG_ENCRYPT);
342
343 /* Load data and write to result or context */
344 ahash_append_load_str(desc, ctx->ctx_len);
345
346 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
347 DMA_TO_DEVICE);
348 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
349 dev_err(jrdev, "unable to map shared descriptor\n");
350 return -ENOMEM;
351 }
352 #ifdef DEBUG
353 print_hex_dump(KERN_ERR,
354 "ahash update shdesc@"__stringify(__LINE__)": ",
355 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
356 #endif
357
358 /* ahash_update_first shared descriptor */
359 desc = ctx->sh_desc_update_first;
360
361 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
362 ctx->ctx_len, ctx);
363
364 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
365 desc_bytes(desc),
366 DMA_TO_DEVICE);
367 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
368 dev_err(jrdev, "unable to map shared descriptor\n");
369 return -ENOMEM;
370 }
371 #ifdef DEBUG
372 print_hex_dump(KERN_ERR,
373 "ahash update first shdesc@"__stringify(__LINE__)": ",
374 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
375 #endif
376
377 /* ahash_final shared descriptor */
378 desc = ctx->sh_desc_fin;
379
380 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
381 OP_ALG_AS_FINALIZE, digestsize, ctx);
382
383 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
384 DMA_TO_DEVICE);
385 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
386 dev_err(jrdev, "unable to map shared descriptor\n");
387 return -ENOMEM;
388 }
389 #ifdef DEBUG
390 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
391 DUMP_PREFIX_ADDRESS, 16, 4, desc,
392 desc_bytes(desc), 1);
393 #endif
394
395 /* ahash_finup shared descriptor */
396 desc = ctx->sh_desc_finup;
397
398 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
399 OP_ALG_AS_FINALIZE, digestsize, ctx);
400
401 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
402 DMA_TO_DEVICE);
403 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
404 dev_err(jrdev, "unable to map shared descriptor\n");
405 return -ENOMEM;
406 }
407 #ifdef DEBUG
408 print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
409 DUMP_PREFIX_ADDRESS, 16, 4, desc,
410 desc_bytes(desc), 1);
411 #endif
412
413 /* ahash_digest shared descriptor */
414 desc = ctx->sh_desc_digest;
415
416 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
417 digestsize, ctx);
418
419 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
420 desc_bytes(desc),
421 DMA_TO_DEVICE);
422 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
423 dev_err(jrdev, "unable to map shared descriptor\n");
424 return -ENOMEM;
425 }
426 #ifdef DEBUG
427 print_hex_dump(KERN_ERR,
428 "ahash digest shdesc@"__stringify(__LINE__)": ",
429 DUMP_PREFIX_ADDRESS, 16, 4, desc,
430 desc_bytes(desc), 1);
431 #endif
432
433 return 0;
434 }
435
gen_split_hash_key(struct caam_hash_ctx * ctx,const u8 * key_in,u32 keylen)436 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
437 u32 keylen)
438 {
439 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
440 ctx->split_key_pad_len, key_in, keylen,
441 ctx->alg_op);
442 }
443
444 /* Digest hash size if it is too large */
hash_digest_key(struct caam_hash_ctx * ctx,const u8 * key_in,u32 * keylen,u8 * key_out,u32 digestsize)445 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
446 u32 *keylen, u8 *key_out, u32 digestsize)
447 {
448 struct device *jrdev = ctx->jrdev;
449 u32 *desc;
450 struct split_key_result result;
451 dma_addr_t src_dma, dst_dma;
452 int ret = 0;
453
454 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
455 if (!desc) {
456 dev_err(jrdev, "unable to allocate key input memory\n");
457 return -ENOMEM;
458 }
459
460 init_job_desc(desc, 0);
461
462 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
463 DMA_TO_DEVICE);
464 if (dma_mapping_error(jrdev, src_dma)) {
465 dev_err(jrdev, "unable to map key input memory\n");
466 kfree(desc);
467 return -ENOMEM;
468 }
469 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
470 DMA_FROM_DEVICE);
471 if (dma_mapping_error(jrdev, dst_dma)) {
472 dev_err(jrdev, "unable to map key output memory\n");
473 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
474 kfree(desc);
475 return -ENOMEM;
476 }
477
478 /* Job descriptor to perform unkeyed hash on key_in */
479 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
480 OP_ALG_AS_INITFINAL);
481 append_seq_in_ptr(desc, src_dma, *keylen, 0);
482 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
483 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
484 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
485 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
486 LDST_SRCDST_BYTE_CONTEXT);
487
488 #ifdef DEBUG
489 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
490 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
491 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
492 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
493 #endif
494
495 result.err = 0;
496 init_completion(&result.completion);
497
498 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
499 if (!ret) {
500 /* in progress */
501 wait_for_completion(&result.completion);
502 ret = result.err;
503 #ifdef DEBUG
504 print_hex_dump(KERN_ERR,
505 "digested key@"__stringify(__LINE__)": ",
506 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
507 digestsize, 1);
508 #endif
509 }
510 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
511 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
512
513 *keylen = digestsize;
514
515 kfree(desc);
516
517 return ret;
518 }
519
ahash_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)520 static int ahash_setkey(struct crypto_ahash *ahash,
521 const u8 *key, unsigned int keylen)
522 {
523 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
524 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
525 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
526 struct device *jrdev = ctx->jrdev;
527 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
528 int digestsize = crypto_ahash_digestsize(ahash);
529 int ret = 0;
530 u8 *hashed_key = NULL;
531
532 #ifdef DEBUG
533 printk(KERN_ERR "keylen %d\n", keylen);
534 #endif
535
536 if (keylen > blocksize) {
537 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
538 GFP_DMA);
539 if (!hashed_key)
540 return -ENOMEM;
541 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
542 digestsize);
543 if (ret)
544 goto badkey;
545 key = hashed_key;
546 }
547
548 /* Pick class 2 key length from algorithm submask */
549 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
550 OP_ALG_ALGSEL_SHIFT] * 2;
551 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
552
553 #ifdef DEBUG
554 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
555 ctx->split_key_len, ctx->split_key_pad_len);
556 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
557 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
558 #endif
559
560 ret = gen_split_hash_key(ctx, key, keylen);
561 if (ret)
562 goto badkey;
563
564 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
565 DMA_TO_DEVICE);
566 if (dma_mapping_error(jrdev, ctx->key_dma)) {
567 dev_err(jrdev, "unable to map key i/o memory\n");
568 ret = -ENOMEM;
569 goto map_err;
570 }
571 #ifdef DEBUG
572 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
573 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
574 ctx->split_key_pad_len, 1);
575 #endif
576
577 ret = ahash_set_sh_desc(ahash);
578 if (ret) {
579 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
580 DMA_TO_DEVICE);
581 }
582
583 map_err:
584 kfree(hashed_key);
585 return ret;
586 badkey:
587 kfree(hashed_key);
588 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
589 return -EINVAL;
590 }
591
592 /*
593 * ahash_edesc - s/w-extended ahash descriptor
594 * @dst_dma: physical mapped address of req->result
595 * @sec4_sg_dma: physical mapped address of h/w link table
596 * @src_nents: number of segments in input scatterlist
597 * @sec4_sg_bytes: length of dma mapped sec4_sg space
598 * @sec4_sg: pointer to h/w link table
599 * @hw_desc: the h/w job descriptor followed by any referenced link tables
600 */
601 struct ahash_edesc {
602 dma_addr_t dst_dma;
603 dma_addr_t sec4_sg_dma;
604 int src_nents;
605 int sec4_sg_bytes;
606 struct sec4_sg_entry *sec4_sg;
607 u32 hw_desc[0];
608 };
609
ahash_unmap(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req,int dst_len)610 static inline void ahash_unmap(struct device *dev,
611 struct ahash_edesc *edesc,
612 struct ahash_request *req, int dst_len)
613 {
614 if (edesc->src_nents)
615 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
616 if (edesc->dst_dma)
617 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
618
619 if (edesc->sec4_sg_bytes)
620 dma_unmap_single(dev, edesc->sec4_sg_dma,
621 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
622 }
623
ahash_unmap_ctx(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req,int dst_len,u32 flag)624 static inline void ahash_unmap_ctx(struct device *dev,
625 struct ahash_edesc *edesc,
626 struct ahash_request *req, int dst_len, u32 flag)
627 {
628 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
629 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
630 struct caam_hash_state *state = ahash_request_ctx(req);
631
632 if (state->ctx_dma)
633 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
634 ahash_unmap(dev, edesc, req, dst_len);
635 }
636
ahash_done(struct device * jrdev,u32 * desc,u32 err,void * context)637 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
638 void *context)
639 {
640 struct ahash_request *req = context;
641 struct ahash_edesc *edesc;
642 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
643 int digestsize = crypto_ahash_digestsize(ahash);
644 #ifdef DEBUG
645 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
646 struct caam_hash_state *state = ahash_request_ctx(req);
647
648 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
649 #endif
650
651 edesc = (struct ahash_edesc *)((char *)desc -
652 offsetof(struct ahash_edesc, hw_desc));
653 if (err)
654 caam_jr_strstatus(jrdev, err);
655
656 ahash_unmap(jrdev, edesc, req, digestsize);
657 kfree(edesc);
658
659 #ifdef DEBUG
660 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
661 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
662 ctx->ctx_len, 1);
663 if (req->result)
664 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
665 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
666 digestsize, 1);
667 #endif
668
669 req->base.complete(&req->base, err);
670 }
671
ahash_done_bi(struct device * jrdev,u32 * desc,u32 err,void * context)672 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
673 void *context)
674 {
675 struct ahash_request *req = context;
676 struct ahash_edesc *edesc;
677 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
678 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
679 #ifdef DEBUG
680 struct caam_hash_state *state = ahash_request_ctx(req);
681 int digestsize = crypto_ahash_digestsize(ahash);
682
683 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
684 #endif
685
686 edesc = (struct ahash_edesc *)((char *)desc -
687 offsetof(struct ahash_edesc, hw_desc));
688 if (err)
689 caam_jr_strstatus(jrdev, err);
690
691 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
692 kfree(edesc);
693
694 #ifdef DEBUG
695 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
696 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
697 ctx->ctx_len, 1);
698 if (req->result)
699 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
700 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
701 digestsize, 1);
702 #endif
703
704 req->base.complete(&req->base, err);
705 }
706
ahash_done_ctx_src(struct device * jrdev,u32 * desc,u32 err,void * context)707 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
708 void *context)
709 {
710 struct ahash_request *req = context;
711 struct ahash_edesc *edesc;
712 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
713 int digestsize = crypto_ahash_digestsize(ahash);
714 #ifdef DEBUG
715 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
716 struct caam_hash_state *state = ahash_request_ctx(req);
717
718 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
719 #endif
720
721 edesc = (struct ahash_edesc *)((char *)desc -
722 offsetof(struct ahash_edesc, hw_desc));
723 if (err)
724 caam_jr_strstatus(jrdev, err);
725
726 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
727 kfree(edesc);
728
729 #ifdef DEBUG
730 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
731 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
732 ctx->ctx_len, 1);
733 if (req->result)
734 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
735 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
736 digestsize, 1);
737 #endif
738
739 req->base.complete(&req->base, err);
740 }
741
ahash_done_ctx_dst(struct device * jrdev,u32 * desc,u32 err,void * context)742 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
743 void *context)
744 {
745 struct ahash_request *req = context;
746 struct ahash_edesc *edesc;
747 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
748 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
749 #ifdef DEBUG
750 struct caam_hash_state *state = ahash_request_ctx(req);
751 int digestsize = crypto_ahash_digestsize(ahash);
752
753 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
754 #endif
755
756 edesc = (struct ahash_edesc *)((char *)desc -
757 offsetof(struct ahash_edesc, hw_desc));
758 if (err)
759 caam_jr_strstatus(jrdev, err);
760
761 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
762 kfree(edesc);
763
764 #ifdef DEBUG
765 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
766 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
767 ctx->ctx_len, 1);
768 if (req->result)
769 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
770 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
771 digestsize, 1);
772 #endif
773
774 req->base.complete(&req->base, err);
775 }
776
777 /* submit update job descriptor */
ahash_update_ctx(struct ahash_request * req)778 static int ahash_update_ctx(struct ahash_request *req)
779 {
780 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
781 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
782 struct caam_hash_state *state = ahash_request_ctx(req);
783 struct device *jrdev = ctx->jrdev;
784 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
785 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
786 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
787 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
788 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
789 int *next_buflen = state->current_buf ? &state->buflen_0 :
790 &state->buflen_1, last_buflen;
791 int in_len = *buflen + req->nbytes, to_hash;
792 u32 *sh_desc = ctx->sh_desc_update, *desc;
793 dma_addr_t ptr = ctx->sh_desc_update_dma;
794 int src_nents, sec4_sg_bytes, sec4_sg_src_index;
795 struct ahash_edesc *edesc;
796 int ret = 0;
797 int sh_len;
798
799 last_buflen = *next_buflen;
800 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
801 to_hash = in_len - *next_buflen;
802
803 if (to_hash) {
804 src_nents = sg_nents_for_len(req->src,
805 req->nbytes - (*next_buflen));
806 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
807 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
808 sizeof(struct sec4_sg_entry);
809
810 /*
811 * allocate space for base edesc and hw desc commands,
812 * link tables
813 */
814 edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
815 sec4_sg_bytes, GFP_DMA | flags);
816 if (!edesc) {
817 dev_err(jrdev,
818 "could not allocate extended descriptor\n");
819 return -ENOMEM;
820 }
821
822 edesc->src_nents = src_nents;
823 edesc->sec4_sg_bytes = sec4_sg_bytes;
824 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
825 DESC_JOB_IO_LEN;
826
827 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
828 edesc->sec4_sg, DMA_BIDIRECTIONAL);
829 if (ret)
830 return ret;
831
832 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
833 edesc->sec4_sg + 1,
834 buf, state->buf_dma,
835 *buflen, last_buflen);
836
837 if (src_nents) {
838 src_map_to_sec4_sg(jrdev, req->src, src_nents,
839 edesc->sec4_sg + sec4_sg_src_index);
840 if (*next_buflen)
841 scatterwalk_map_and_copy(next_buf, req->src,
842 to_hash - *buflen,
843 *next_buflen, 0);
844 } else {
845 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
846 SEC4_SG_LEN_FIN;
847 }
848
849 state->current_buf = !state->current_buf;
850
851 sh_len = desc_len(sh_desc);
852 desc = edesc->hw_desc;
853 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
854 HDR_REVERSE);
855
856 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
857 sec4_sg_bytes,
858 DMA_TO_DEVICE);
859 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
860 dev_err(jrdev, "unable to map S/G table\n");
861 return -ENOMEM;
862 }
863
864 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
865 to_hash, LDST_SGF);
866
867 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
868
869 #ifdef DEBUG
870 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
871 DUMP_PREFIX_ADDRESS, 16, 4, desc,
872 desc_bytes(desc), 1);
873 #endif
874
875 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
876 if (!ret) {
877 ret = -EINPROGRESS;
878 } else {
879 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
880 DMA_BIDIRECTIONAL);
881 kfree(edesc);
882 }
883 } else if (*next_buflen) {
884 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
885 req->nbytes, 0);
886 *buflen = *next_buflen;
887 *next_buflen = last_buflen;
888 }
889 #ifdef DEBUG
890 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
891 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
892 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
893 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
894 *next_buflen, 1);
895 #endif
896
897 return ret;
898 }
899
ahash_final_ctx(struct ahash_request * req)900 static int ahash_final_ctx(struct ahash_request *req)
901 {
902 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
903 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
904 struct caam_hash_state *state = ahash_request_ctx(req);
905 struct device *jrdev = ctx->jrdev;
906 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
907 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
908 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
909 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
910 int last_buflen = state->current_buf ? state->buflen_0 :
911 state->buflen_1;
912 u32 *sh_desc = ctx->sh_desc_fin, *desc;
913 dma_addr_t ptr = ctx->sh_desc_fin_dma;
914 int sec4_sg_bytes, sec4_sg_src_index;
915 int digestsize = crypto_ahash_digestsize(ahash);
916 struct ahash_edesc *edesc;
917 int ret = 0;
918 int sh_len;
919
920 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
921 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
922
923 /* allocate space for base edesc and hw desc commands, link tables */
924 edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
925 GFP_DMA | flags);
926 if (!edesc) {
927 dev_err(jrdev, "could not allocate extended descriptor\n");
928 return -ENOMEM;
929 }
930
931 sh_len = desc_len(sh_desc);
932 desc = edesc->hw_desc;
933 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
934
935 edesc->sec4_sg_bytes = sec4_sg_bytes;
936 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
937 DESC_JOB_IO_LEN;
938 edesc->src_nents = 0;
939
940 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
941 edesc->sec4_sg, DMA_TO_DEVICE);
942 if (ret)
943 return ret;
944
945 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
946 buf, state->buf_dma, buflen,
947 last_buflen);
948 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
949
950 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
951 sec4_sg_bytes, DMA_TO_DEVICE);
952 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
953 dev_err(jrdev, "unable to map S/G table\n");
954 return -ENOMEM;
955 }
956
957 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
958 LDST_SGF);
959
960 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
961 digestsize);
962 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
963 dev_err(jrdev, "unable to map dst\n");
964 return -ENOMEM;
965 }
966
967 #ifdef DEBUG
968 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
969 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
970 #endif
971
972 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
973 if (!ret) {
974 ret = -EINPROGRESS;
975 } else {
976 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
977 kfree(edesc);
978 }
979
980 return ret;
981 }
982
ahash_finup_ctx(struct ahash_request * req)983 static int ahash_finup_ctx(struct ahash_request *req)
984 {
985 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
986 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
987 struct caam_hash_state *state = ahash_request_ctx(req);
988 struct device *jrdev = ctx->jrdev;
989 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
990 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
991 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
992 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
993 int last_buflen = state->current_buf ? state->buflen_0 :
994 state->buflen_1;
995 u32 *sh_desc = ctx->sh_desc_finup, *desc;
996 dma_addr_t ptr = ctx->sh_desc_finup_dma;
997 int sec4_sg_bytes, sec4_sg_src_index;
998 int src_nents;
999 int digestsize = crypto_ahash_digestsize(ahash);
1000 struct ahash_edesc *edesc;
1001 int ret = 0;
1002 int sh_len;
1003
1004 src_nents = sg_nents_for_len(req->src, req->nbytes);
1005 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1006 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1007 sizeof(struct sec4_sg_entry);
1008
1009 /* allocate space for base edesc and hw desc commands, link tables */
1010 edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
1011 GFP_DMA | flags);
1012 if (!edesc) {
1013 dev_err(jrdev, "could not allocate extended descriptor\n");
1014 return -ENOMEM;
1015 }
1016
1017 sh_len = desc_len(sh_desc);
1018 desc = edesc->hw_desc;
1019 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1020
1021 edesc->src_nents = src_nents;
1022 edesc->sec4_sg_bytes = sec4_sg_bytes;
1023 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1024 DESC_JOB_IO_LEN;
1025
1026 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1027 edesc->sec4_sg, DMA_TO_DEVICE);
1028 if (ret)
1029 return ret;
1030
1031 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1032 buf, state->buf_dma, buflen,
1033 last_buflen);
1034
1035 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1036 sec4_sg_src_index);
1037
1038 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1039 sec4_sg_bytes, DMA_TO_DEVICE);
1040 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1041 dev_err(jrdev, "unable to map S/G table\n");
1042 return -ENOMEM;
1043 }
1044
1045 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1046 buflen + req->nbytes, LDST_SGF);
1047
1048 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1049 digestsize);
1050 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1051 dev_err(jrdev, "unable to map dst\n");
1052 return -ENOMEM;
1053 }
1054
1055 #ifdef DEBUG
1056 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1057 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1058 #endif
1059
1060 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1061 if (!ret) {
1062 ret = -EINPROGRESS;
1063 } else {
1064 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1065 kfree(edesc);
1066 }
1067
1068 return ret;
1069 }
1070
ahash_digest(struct ahash_request * req)1071 static int ahash_digest(struct ahash_request *req)
1072 {
1073 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1074 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1075 struct device *jrdev = ctx->jrdev;
1076 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1077 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1078 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1079 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1080 int digestsize = crypto_ahash_digestsize(ahash);
1081 int src_nents, sec4_sg_bytes;
1082 dma_addr_t src_dma;
1083 struct ahash_edesc *edesc;
1084 int ret = 0;
1085 u32 options;
1086 int sh_len;
1087
1088 src_nents = sg_count(req->src, req->nbytes);
1089 dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
1090 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1091
1092 /* allocate space for base edesc and hw desc commands, link tables */
1093 edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN,
1094 GFP_DMA | flags);
1095 if (!edesc) {
1096 dev_err(jrdev, "could not allocate extended descriptor\n");
1097 return -ENOMEM;
1098 }
1099 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1100 DESC_JOB_IO_LEN;
1101 edesc->sec4_sg_bytes = sec4_sg_bytes;
1102 edesc->src_nents = src_nents;
1103
1104 sh_len = desc_len(sh_desc);
1105 desc = edesc->hw_desc;
1106 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1107
1108 if (src_nents) {
1109 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1110 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1111 sec4_sg_bytes, DMA_TO_DEVICE);
1112 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1113 dev_err(jrdev, "unable to map S/G table\n");
1114 return -ENOMEM;
1115 }
1116 src_dma = edesc->sec4_sg_dma;
1117 options = LDST_SGF;
1118 } else {
1119 src_dma = sg_dma_address(req->src);
1120 options = 0;
1121 }
1122 append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1123
1124 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1125 digestsize);
1126 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1127 dev_err(jrdev, "unable to map dst\n");
1128 return -ENOMEM;
1129 }
1130
1131 #ifdef DEBUG
1132 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1133 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1134 #endif
1135
1136 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1137 if (!ret) {
1138 ret = -EINPROGRESS;
1139 } else {
1140 ahash_unmap(jrdev, edesc, req, digestsize);
1141 kfree(edesc);
1142 }
1143
1144 return ret;
1145 }
1146
1147 /* submit ahash final if it the first job descriptor */
ahash_final_no_ctx(struct ahash_request * req)1148 static int ahash_final_no_ctx(struct ahash_request *req)
1149 {
1150 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1151 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1152 struct caam_hash_state *state = ahash_request_ctx(req);
1153 struct device *jrdev = ctx->jrdev;
1154 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1155 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1156 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1157 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1158 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1159 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1160 int digestsize = crypto_ahash_digestsize(ahash);
1161 struct ahash_edesc *edesc;
1162 int ret = 0;
1163 int sh_len;
1164
1165 /* allocate space for base edesc and hw desc commands, link tables */
1166 edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags);
1167 if (!edesc) {
1168 dev_err(jrdev, "could not allocate extended descriptor\n");
1169 return -ENOMEM;
1170 }
1171
1172 edesc->sec4_sg_bytes = 0;
1173 sh_len = desc_len(sh_desc);
1174 desc = edesc->hw_desc;
1175 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1176
1177 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1178 if (dma_mapping_error(jrdev, state->buf_dma)) {
1179 dev_err(jrdev, "unable to map src\n");
1180 return -ENOMEM;
1181 }
1182
1183 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1184
1185 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1186 digestsize);
1187 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1188 dev_err(jrdev, "unable to map dst\n");
1189 return -ENOMEM;
1190 }
1191 edesc->src_nents = 0;
1192
1193 #ifdef DEBUG
1194 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1195 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1196 #endif
1197
1198 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1199 if (!ret) {
1200 ret = -EINPROGRESS;
1201 } else {
1202 ahash_unmap(jrdev, edesc, req, digestsize);
1203 kfree(edesc);
1204 }
1205
1206 return ret;
1207 }
1208
1209 /* submit ahash update if it the first job descriptor after update */
ahash_update_no_ctx(struct ahash_request * req)1210 static int ahash_update_no_ctx(struct ahash_request *req)
1211 {
1212 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1213 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1214 struct caam_hash_state *state = ahash_request_ctx(req);
1215 struct device *jrdev = ctx->jrdev;
1216 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1217 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1218 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1219 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1220 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1221 int *next_buflen = state->current_buf ? &state->buflen_0 :
1222 &state->buflen_1;
1223 int in_len = *buflen + req->nbytes, to_hash;
1224 int sec4_sg_bytes, src_nents;
1225 struct ahash_edesc *edesc;
1226 u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1227 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1228 int ret = 0;
1229 int sh_len;
1230
1231 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1232 to_hash = in_len - *next_buflen;
1233
1234 if (to_hash) {
1235 src_nents = sg_nents_for_len(req->src,
1236 req->nbytes - (*next_buflen));
1237 sec4_sg_bytes = (1 + src_nents) *
1238 sizeof(struct sec4_sg_entry);
1239
1240 /*
1241 * allocate space for base edesc and hw desc commands,
1242 * link tables
1243 */
1244 edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
1245 sec4_sg_bytes, GFP_DMA | flags);
1246 if (!edesc) {
1247 dev_err(jrdev,
1248 "could not allocate extended descriptor\n");
1249 return -ENOMEM;
1250 }
1251
1252 edesc->src_nents = src_nents;
1253 edesc->sec4_sg_bytes = sec4_sg_bytes;
1254 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1255 DESC_JOB_IO_LEN;
1256 edesc->dst_dma = 0;
1257
1258 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1259 buf, *buflen);
1260 src_map_to_sec4_sg(jrdev, req->src, src_nents,
1261 edesc->sec4_sg + 1);
1262 if (*next_buflen) {
1263 scatterwalk_map_and_copy(next_buf, req->src,
1264 to_hash - *buflen,
1265 *next_buflen, 0);
1266 }
1267
1268 state->current_buf = !state->current_buf;
1269
1270 sh_len = desc_len(sh_desc);
1271 desc = edesc->hw_desc;
1272 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1273 HDR_REVERSE);
1274
1275 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1276 sec4_sg_bytes,
1277 DMA_TO_DEVICE);
1278 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1279 dev_err(jrdev, "unable to map S/G table\n");
1280 return -ENOMEM;
1281 }
1282
1283 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1284
1285 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1286 if (ret)
1287 return ret;
1288
1289 #ifdef DEBUG
1290 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1291 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1292 desc_bytes(desc), 1);
1293 #endif
1294
1295 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1296 if (!ret) {
1297 ret = -EINPROGRESS;
1298 state->update = ahash_update_ctx;
1299 state->finup = ahash_finup_ctx;
1300 state->final = ahash_final_ctx;
1301 } else {
1302 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1303 DMA_TO_DEVICE);
1304 kfree(edesc);
1305 }
1306 } else if (*next_buflen) {
1307 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1308 req->nbytes, 0);
1309 *buflen = *next_buflen;
1310 *next_buflen = 0;
1311 }
1312 #ifdef DEBUG
1313 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1314 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1315 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1316 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1317 *next_buflen, 1);
1318 #endif
1319
1320 return ret;
1321 }
1322
1323 /* submit ahash finup if it the first job descriptor after update */
ahash_finup_no_ctx(struct ahash_request * req)1324 static int ahash_finup_no_ctx(struct ahash_request *req)
1325 {
1326 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1327 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1328 struct caam_hash_state *state = ahash_request_ctx(req);
1329 struct device *jrdev = ctx->jrdev;
1330 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1331 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1332 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1333 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1334 int last_buflen = state->current_buf ? state->buflen_0 :
1335 state->buflen_1;
1336 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1337 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1338 int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1339 int digestsize = crypto_ahash_digestsize(ahash);
1340 struct ahash_edesc *edesc;
1341 int sh_len;
1342 int ret = 0;
1343
1344 src_nents = sg_nents_for_len(req->src, req->nbytes);
1345 sec4_sg_src_index = 2;
1346 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1347 sizeof(struct sec4_sg_entry);
1348
1349 /* allocate space for base edesc and hw desc commands, link tables */
1350 edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
1351 GFP_DMA | flags);
1352 if (!edesc) {
1353 dev_err(jrdev, "could not allocate extended descriptor\n");
1354 return -ENOMEM;
1355 }
1356
1357 sh_len = desc_len(sh_desc);
1358 desc = edesc->hw_desc;
1359 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1360
1361 edesc->src_nents = src_nents;
1362 edesc->sec4_sg_bytes = sec4_sg_bytes;
1363 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1364 DESC_JOB_IO_LEN;
1365
1366 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1367 state->buf_dma, buflen,
1368 last_buflen);
1369
1370 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1);
1371
1372 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1373 sec4_sg_bytes, DMA_TO_DEVICE);
1374 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1375 dev_err(jrdev, "unable to map S/G table\n");
1376 return -ENOMEM;
1377 }
1378
1379 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1380 req->nbytes, LDST_SGF);
1381
1382 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1383 digestsize);
1384 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1385 dev_err(jrdev, "unable to map dst\n");
1386 return -ENOMEM;
1387 }
1388
1389 #ifdef DEBUG
1390 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1391 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1392 #endif
1393
1394 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1395 if (!ret) {
1396 ret = -EINPROGRESS;
1397 } else {
1398 ahash_unmap(jrdev, edesc, req, digestsize);
1399 kfree(edesc);
1400 }
1401
1402 return ret;
1403 }
1404
1405 /* submit first update job descriptor after init */
ahash_update_first(struct ahash_request * req)1406 static int ahash_update_first(struct ahash_request *req)
1407 {
1408 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1409 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1410 struct caam_hash_state *state = ahash_request_ctx(req);
1411 struct device *jrdev = ctx->jrdev;
1412 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1413 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1414 u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1415 int *next_buflen = state->current_buf ?
1416 &state->buflen_1 : &state->buflen_0;
1417 int to_hash;
1418 u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1419 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1420 int sec4_sg_bytes, src_nents;
1421 dma_addr_t src_dma;
1422 u32 options;
1423 struct ahash_edesc *edesc;
1424 int ret = 0;
1425 int sh_len;
1426
1427 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1428 1);
1429 to_hash = req->nbytes - *next_buflen;
1430
1431 if (to_hash) {
1432 src_nents = sg_count(req->src, req->nbytes - (*next_buflen));
1433 dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
1434 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1435
1436 /*
1437 * allocate space for base edesc and hw desc commands,
1438 * link tables
1439 */
1440 edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
1441 sec4_sg_bytes, GFP_DMA | flags);
1442 if (!edesc) {
1443 dev_err(jrdev,
1444 "could not allocate extended descriptor\n");
1445 return -ENOMEM;
1446 }
1447
1448 edesc->src_nents = src_nents;
1449 edesc->sec4_sg_bytes = sec4_sg_bytes;
1450 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1451 DESC_JOB_IO_LEN;
1452 edesc->dst_dma = 0;
1453
1454 if (src_nents) {
1455 sg_to_sec4_sg_last(req->src, src_nents,
1456 edesc->sec4_sg, 0);
1457 edesc->sec4_sg_dma = dma_map_single(jrdev,
1458 edesc->sec4_sg,
1459 sec4_sg_bytes,
1460 DMA_TO_DEVICE);
1461 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1462 dev_err(jrdev, "unable to map S/G table\n");
1463 return -ENOMEM;
1464 }
1465 src_dma = edesc->sec4_sg_dma;
1466 options = LDST_SGF;
1467 } else {
1468 src_dma = sg_dma_address(req->src);
1469 options = 0;
1470 }
1471
1472 if (*next_buflen)
1473 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1474 *next_buflen, 0);
1475
1476 sh_len = desc_len(sh_desc);
1477 desc = edesc->hw_desc;
1478 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1479 HDR_REVERSE);
1480
1481 append_seq_in_ptr(desc, src_dma, to_hash, options);
1482
1483 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1484 if (ret)
1485 return ret;
1486
1487 #ifdef DEBUG
1488 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1489 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1490 desc_bytes(desc), 1);
1491 #endif
1492
1493 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1494 req);
1495 if (!ret) {
1496 ret = -EINPROGRESS;
1497 state->update = ahash_update_ctx;
1498 state->finup = ahash_finup_ctx;
1499 state->final = ahash_final_ctx;
1500 } else {
1501 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1502 DMA_TO_DEVICE);
1503 kfree(edesc);
1504 }
1505 } else if (*next_buflen) {
1506 state->update = ahash_update_no_ctx;
1507 state->finup = ahash_finup_no_ctx;
1508 state->final = ahash_final_no_ctx;
1509 scatterwalk_map_and_copy(next_buf, req->src, 0,
1510 req->nbytes, 0);
1511 }
1512 #ifdef DEBUG
1513 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1514 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1515 *next_buflen, 1);
1516 #endif
1517
1518 return ret;
1519 }
1520
ahash_finup_first(struct ahash_request * req)1521 static int ahash_finup_first(struct ahash_request *req)
1522 {
1523 return ahash_digest(req);
1524 }
1525
ahash_init(struct ahash_request * req)1526 static int ahash_init(struct ahash_request *req)
1527 {
1528 struct caam_hash_state *state = ahash_request_ctx(req);
1529
1530 state->update = ahash_update_first;
1531 state->finup = ahash_finup_first;
1532 state->final = ahash_final_no_ctx;
1533
1534 state->current_buf = 0;
1535 state->buf_dma = 0;
1536 state->buflen_0 = 0;
1537 state->buflen_1 = 0;
1538
1539 return 0;
1540 }
1541
ahash_update(struct ahash_request * req)1542 static int ahash_update(struct ahash_request *req)
1543 {
1544 struct caam_hash_state *state = ahash_request_ctx(req);
1545
1546 return state->update(req);
1547 }
1548
ahash_finup(struct ahash_request * req)1549 static int ahash_finup(struct ahash_request *req)
1550 {
1551 struct caam_hash_state *state = ahash_request_ctx(req);
1552
1553 return state->finup(req);
1554 }
1555
ahash_final(struct ahash_request * req)1556 static int ahash_final(struct ahash_request *req)
1557 {
1558 struct caam_hash_state *state = ahash_request_ctx(req);
1559
1560 return state->final(req);
1561 }
1562
ahash_export(struct ahash_request * req,void * out)1563 static int ahash_export(struct ahash_request *req, void *out)
1564 {
1565 struct caam_hash_state *state = ahash_request_ctx(req);
1566 struct caam_export_state *export = out;
1567 int len;
1568 u8 *buf;
1569
1570 if (state->current_buf) {
1571 buf = state->buf_1;
1572 len = state->buflen_1;
1573 } else {
1574 buf = state->buf_0;
1575 len = state->buflen_1;
1576 }
1577
1578 memcpy(export->buf, buf, len);
1579 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1580 export->buflen = len;
1581 export->update = state->update;
1582 export->final = state->final;
1583 export->finup = state->finup;
1584
1585 return 0;
1586 }
1587
ahash_import(struct ahash_request * req,const void * in)1588 static int ahash_import(struct ahash_request *req, const void *in)
1589 {
1590 struct caam_hash_state *state = ahash_request_ctx(req);
1591 const struct caam_export_state *export = in;
1592
1593 memset(state, 0, sizeof(*state));
1594 memcpy(state->buf_0, export->buf, export->buflen);
1595 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1596 state->buflen_0 = export->buflen;
1597 state->update = export->update;
1598 state->final = export->final;
1599 state->finup = export->finup;
1600
1601 return 0;
1602 }
1603
1604 struct caam_hash_template {
1605 char name[CRYPTO_MAX_ALG_NAME];
1606 char driver_name[CRYPTO_MAX_ALG_NAME];
1607 char hmac_name[CRYPTO_MAX_ALG_NAME];
1608 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1609 unsigned int blocksize;
1610 struct ahash_alg template_ahash;
1611 u32 alg_type;
1612 u32 alg_op;
1613 };
1614
1615 /* ahash descriptors */
1616 static struct caam_hash_template driver_hash[] = {
1617 {
1618 .name = "sha1",
1619 .driver_name = "sha1-caam",
1620 .hmac_name = "hmac(sha1)",
1621 .hmac_driver_name = "hmac-sha1-caam",
1622 .blocksize = SHA1_BLOCK_SIZE,
1623 .template_ahash = {
1624 .init = ahash_init,
1625 .update = ahash_update,
1626 .final = ahash_final,
1627 .finup = ahash_finup,
1628 .digest = ahash_digest,
1629 .export = ahash_export,
1630 .import = ahash_import,
1631 .setkey = ahash_setkey,
1632 .halg = {
1633 .digestsize = SHA1_DIGEST_SIZE,
1634 .statesize = sizeof(struct caam_export_state),
1635 },
1636 },
1637 .alg_type = OP_ALG_ALGSEL_SHA1,
1638 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1639 }, {
1640 .name = "sha224",
1641 .driver_name = "sha224-caam",
1642 .hmac_name = "hmac(sha224)",
1643 .hmac_driver_name = "hmac-sha224-caam",
1644 .blocksize = SHA224_BLOCK_SIZE,
1645 .template_ahash = {
1646 .init = ahash_init,
1647 .update = ahash_update,
1648 .final = ahash_final,
1649 .finup = ahash_finup,
1650 .digest = ahash_digest,
1651 .export = ahash_export,
1652 .import = ahash_import,
1653 .setkey = ahash_setkey,
1654 .halg = {
1655 .digestsize = SHA224_DIGEST_SIZE,
1656 .statesize = sizeof(struct caam_export_state),
1657 },
1658 },
1659 .alg_type = OP_ALG_ALGSEL_SHA224,
1660 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1661 }, {
1662 .name = "sha256",
1663 .driver_name = "sha256-caam",
1664 .hmac_name = "hmac(sha256)",
1665 .hmac_driver_name = "hmac-sha256-caam",
1666 .blocksize = SHA256_BLOCK_SIZE,
1667 .template_ahash = {
1668 .init = ahash_init,
1669 .update = ahash_update,
1670 .final = ahash_final,
1671 .finup = ahash_finup,
1672 .digest = ahash_digest,
1673 .export = ahash_export,
1674 .import = ahash_import,
1675 .setkey = ahash_setkey,
1676 .halg = {
1677 .digestsize = SHA256_DIGEST_SIZE,
1678 .statesize = sizeof(struct caam_export_state),
1679 },
1680 },
1681 .alg_type = OP_ALG_ALGSEL_SHA256,
1682 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1683 }, {
1684 .name = "sha384",
1685 .driver_name = "sha384-caam",
1686 .hmac_name = "hmac(sha384)",
1687 .hmac_driver_name = "hmac-sha384-caam",
1688 .blocksize = SHA384_BLOCK_SIZE,
1689 .template_ahash = {
1690 .init = ahash_init,
1691 .update = ahash_update,
1692 .final = ahash_final,
1693 .finup = ahash_finup,
1694 .digest = ahash_digest,
1695 .export = ahash_export,
1696 .import = ahash_import,
1697 .setkey = ahash_setkey,
1698 .halg = {
1699 .digestsize = SHA384_DIGEST_SIZE,
1700 .statesize = sizeof(struct caam_export_state),
1701 },
1702 },
1703 .alg_type = OP_ALG_ALGSEL_SHA384,
1704 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1705 }, {
1706 .name = "sha512",
1707 .driver_name = "sha512-caam",
1708 .hmac_name = "hmac(sha512)",
1709 .hmac_driver_name = "hmac-sha512-caam",
1710 .blocksize = SHA512_BLOCK_SIZE,
1711 .template_ahash = {
1712 .init = ahash_init,
1713 .update = ahash_update,
1714 .final = ahash_final,
1715 .finup = ahash_finup,
1716 .digest = ahash_digest,
1717 .export = ahash_export,
1718 .import = ahash_import,
1719 .setkey = ahash_setkey,
1720 .halg = {
1721 .digestsize = SHA512_DIGEST_SIZE,
1722 .statesize = sizeof(struct caam_export_state),
1723 },
1724 },
1725 .alg_type = OP_ALG_ALGSEL_SHA512,
1726 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1727 }, {
1728 .name = "md5",
1729 .driver_name = "md5-caam",
1730 .hmac_name = "hmac(md5)",
1731 .hmac_driver_name = "hmac-md5-caam",
1732 .blocksize = MD5_BLOCK_WORDS * 4,
1733 .template_ahash = {
1734 .init = ahash_init,
1735 .update = ahash_update,
1736 .final = ahash_final,
1737 .finup = ahash_finup,
1738 .digest = ahash_digest,
1739 .export = ahash_export,
1740 .import = ahash_import,
1741 .setkey = ahash_setkey,
1742 .halg = {
1743 .digestsize = MD5_DIGEST_SIZE,
1744 .statesize = sizeof(struct caam_export_state),
1745 },
1746 },
1747 .alg_type = OP_ALG_ALGSEL_MD5,
1748 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1749 },
1750 };
1751
1752 struct caam_hash_alg {
1753 struct list_head entry;
1754 int alg_type;
1755 int alg_op;
1756 struct ahash_alg ahash_alg;
1757 };
1758
caam_hash_cra_init(struct crypto_tfm * tfm)1759 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1760 {
1761 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1762 struct crypto_alg *base = tfm->__crt_alg;
1763 struct hash_alg_common *halg =
1764 container_of(base, struct hash_alg_common, base);
1765 struct ahash_alg *alg =
1766 container_of(halg, struct ahash_alg, halg);
1767 struct caam_hash_alg *caam_hash =
1768 container_of(alg, struct caam_hash_alg, ahash_alg);
1769 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1770 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1771 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1772 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1773 HASH_MSG_LEN + 32,
1774 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1775 HASH_MSG_LEN + 64,
1776 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1777 int ret = 0;
1778
1779 /*
1780 * Get a Job ring from Job Ring driver to ensure in-order
1781 * crypto request processing per tfm
1782 */
1783 ctx->jrdev = caam_jr_alloc();
1784 if (IS_ERR(ctx->jrdev)) {
1785 pr_err("Job Ring Device allocation for transform failed\n");
1786 return PTR_ERR(ctx->jrdev);
1787 }
1788 /* copy descriptor header template value */
1789 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1790 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1791
1792 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1793 OP_ALG_ALGSEL_SHIFT];
1794
1795 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1796 sizeof(struct caam_hash_state));
1797
1798 ret = ahash_set_sh_desc(ahash);
1799
1800 return ret;
1801 }
1802
caam_hash_cra_exit(struct crypto_tfm * tfm)1803 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1804 {
1805 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1806
1807 if (ctx->sh_desc_update_dma &&
1808 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1809 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1810 desc_bytes(ctx->sh_desc_update),
1811 DMA_TO_DEVICE);
1812 if (ctx->sh_desc_update_first_dma &&
1813 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1814 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1815 desc_bytes(ctx->sh_desc_update_first),
1816 DMA_TO_DEVICE);
1817 if (ctx->sh_desc_fin_dma &&
1818 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1819 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1820 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1821 if (ctx->sh_desc_digest_dma &&
1822 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1823 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1824 desc_bytes(ctx->sh_desc_digest),
1825 DMA_TO_DEVICE);
1826 if (ctx->sh_desc_finup_dma &&
1827 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1828 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1829 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1830
1831 caam_jr_free(ctx->jrdev);
1832 }
1833
caam_algapi_hash_exit(void)1834 static void __exit caam_algapi_hash_exit(void)
1835 {
1836 struct caam_hash_alg *t_alg, *n;
1837
1838 if (!hash_list.next)
1839 return;
1840
1841 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1842 crypto_unregister_ahash(&t_alg->ahash_alg);
1843 list_del(&t_alg->entry);
1844 kfree(t_alg);
1845 }
1846 }
1847
1848 static struct caam_hash_alg *
caam_hash_alloc(struct caam_hash_template * template,bool keyed)1849 caam_hash_alloc(struct caam_hash_template *template,
1850 bool keyed)
1851 {
1852 struct caam_hash_alg *t_alg;
1853 struct ahash_alg *halg;
1854 struct crypto_alg *alg;
1855
1856 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1857 if (!t_alg) {
1858 pr_err("failed to allocate t_alg\n");
1859 return ERR_PTR(-ENOMEM);
1860 }
1861
1862 t_alg->ahash_alg = template->template_ahash;
1863 halg = &t_alg->ahash_alg;
1864 alg = &halg->halg.base;
1865
1866 if (keyed) {
1867 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1868 template->hmac_name);
1869 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1870 template->hmac_driver_name);
1871 } else {
1872 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1873 template->name);
1874 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1875 template->driver_name);
1876 t_alg->ahash_alg.setkey = NULL;
1877 }
1878 alg->cra_module = THIS_MODULE;
1879 alg->cra_init = caam_hash_cra_init;
1880 alg->cra_exit = caam_hash_cra_exit;
1881 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1882 alg->cra_priority = CAAM_CRA_PRIORITY;
1883 alg->cra_blocksize = template->blocksize;
1884 alg->cra_alignmask = 0;
1885 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1886 alg->cra_type = &crypto_ahash_type;
1887
1888 t_alg->alg_type = template->alg_type;
1889 t_alg->alg_op = template->alg_op;
1890
1891 return t_alg;
1892 }
1893
caam_algapi_hash_init(void)1894 static int __init caam_algapi_hash_init(void)
1895 {
1896 struct device_node *dev_node;
1897 struct platform_device *pdev;
1898 struct device *ctrldev;
1899 int i = 0, err = 0;
1900 struct caam_drv_private *priv;
1901 unsigned int md_limit = SHA512_DIGEST_SIZE;
1902 u32 cha_inst, cha_vid;
1903
1904 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1905 if (!dev_node) {
1906 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1907 if (!dev_node)
1908 return -ENODEV;
1909 }
1910
1911 pdev = of_find_device_by_node(dev_node);
1912 if (!pdev) {
1913 of_node_put(dev_node);
1914 return -ENODEV;
1915 }
1916
1917 ctrldev = &pdev->dev;
1918 priv = dev_get_drvdata(ctrldev);
1919 of_node_put(dev_node);
1920
1921 /*
1922 * If priv is NULL, it's probably because the caam driver wasn't
1923 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1924 */
1925 if (!priv)
1926 return -ENODEV;
1927
1928 /*
1929 * Register crypto algorithms the device supports. First, identify
1930 * presence and attributes of MD block.
1931 */
1932 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
1933 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1934
1935 /*
1936 * Skip registration of any hashing algorithms if MD block
1937 * is not present.
1938 */
1939 if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
1940 return -ENODEV;
1941
1942 /* Limit digest size based on LP256 */
1943 if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
1944 md_limit = SHA256_DIGEST_SIZE;
1945
1946 INIT_LIST_HEAD(&hash_list);
1947
1948 /* register crypto algorithms the device supports */
1949 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1950 struct caam_hash_alg *t_alg;
1951 struct caam_hash_template *alg = driver_hash + i;
1952
1953 /* If MD size is not supported by device, skip registration */
1954 if (alg->template_ahash.halg.digestsize > md_limit)
1955 continue;
1956
1957 /* register hmac version */
1958 t_alg = caam_hash_alloc(alg, true);
1959 if (IS_ERR(t_alg)) {
1960 err = PTR_ERR(t_alg);
1961 pr_warn("%s alg allocation failed\n", alg->driver_name);
1962 continue;
1963 }
1964
1965 err = crypto_register_ahash(&t_alg->ahash_alg);
1966 if (err) {
1967 pr_warn("%s alg registration failed: %d\n",
1968 t_alg->ahash_alg.halg.base.cra_driver_name,
1969 err);
1970 kfree(t_alg);
1971 } else
1972 list_add_tail(&t_alg->entry, &hash_list);
1973
1974 /* register unkeyed version */
1975 t_alg = caam_hash_alloc(alg, false);
1976 if (IS_ERR(t_alg)) {
1977 err = PTR_ERR(t_alg);
1978 pr_warn("%s alg allocation failed\n", alg->driver_name);
1979 continue;
1980 }
1981
1982 err = crypto_register_ahash(&t_alg->ahash_alg);
1983 if (err) {
1984 pr_warn("%s alg registration failed: %d\n",
1985 t_alg->ahash_alg.halg.base.cra_driver_name,
1986 err);
1987 kfree(t_alg);
1988 } else
1989 list_add_tail(&t_alg->entry, &hash_list);
1990 }
1991
1992 return err;
1993 }
1994
1995 module_init(caam_algapi_hash_init);
1996 module_exit(caam_algapi_hash_exit);
1997
1998 MODULE_LICENSE("GPL");
1999 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
2000 MODULE_AUTHOR("Freescale Semiconductor - NMG");
2001