1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Intel Keem Bay OCS HCU Crypto Driver.
4 *
5 * Copyright (C) 2018-2020 Intel Corporation
6 */
7
8 #include <crypto/engine.h>
9 #include <crypto/hmac.h>
10 #include <crypto/internal/hash.h>
11 #include <crypto/scatterwalk.h>
12 #include <crypto/sha2.h>
13 #include <crypto/sm3.h>
14 #include <linux/completion.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/mod_devicetable.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/string.h>
23
24 #include "ocs-hcu.h"
25
26 #define DRV_NAME "keembay-ocs-hcu"
27
28 /* Flag marking a final request. */
29 #define REQ_FINAL BIT(0)
30 /* Flag marking a HMAC request. */
31 #define REQ_FLAGS_HMAC BIT(1)
32 /* Flag set when HW HMAC is being used. */
33 #define REQ_FLAGS_HMAC_HW BIT(2)
34 /* Flag set when SW HMAC is being used. */
35 #define REQ_FLAGS_HMAC_SW BIT(3)
36
37 /**
38 * struct ocs_hcu_ctx: OCS HCU Transform context.
39 * @hcu_dev: The OCS HCU device used by the transformation.
40 * @key: The key (used only for HMAC transformations).
41 * @key_len: The length of the key.
42 * @is_sm3_tfm: Whether or not this is an SM3 transformation.
43 * @is_hmac_tfm: Whether or not this is a HMAC transformation.
44 */
45 struct ocs_hcu_ctx {
46 struct ocs_hcu_dev *hcu_dev;
47 u8 key[SHA512_BLOCK_SIZE];
48 size_t key_len;
49 bool is_sm3_tfm;
50 bool is_hmac_tfm;
51 };
52
53 /**
54 * struct ocs_hcu_rctx - Context for the request.
55 * @hcu_dev: OCS HCU device to be used to service the request.
56 * @flags: Flags tracking request status.
57 * @algo: Algorithm to use for the request.
58 * @blk_sz: Block size of the transformation / request.
59 * @dig_sz: Digest size of the transformation / request.
60 * @dma_list: OCS DMA linked list.
61 * @hash_ctx: OCS HCU hashing context.
62 * @buffer: Buffer to store: partial block of data and SW HMAC
63 * artifacts (ipad, opad, etc.).
64 * @buf_cnt: Number of bytes currently stored in the buffer.
65 * @buf_dma_addr: The DMA address of @buffer (when mapped).
66 * @buf_dma_count: The number of bytes in @buffer currently DMA-mapped.
67 * @sg: Head of the scatterlist entries containing data.
68 * @sg_data_total: Total data in the SG list at any time.
69 * @sg_data_offset: Offset into the data of the current individual SG node.
70 * @sg_dma_nents: Number of sg entries mapped in dma_list.
71 * @nents: Number of entries in the scatterlist.
72 */
73 struct ocs_hcu_rctx {
74 struct ocs_hcu_dev *hcu_dev;
75 u32 flags;
76 enum ocs_hcu_algo algo;
77 size_t blk_sz;
78 size_t dig_sz;
79 struct ocs_hcu_dma_list *dma_list;
80 struct ocs_hcu_hash_ctx hash_ctx;
81 /*
82 * Buffer is double the block size because we need space for SW HMAC
83 * artifacts, i.e:
84 * - ipad (1 block) + a possible partial block of data.
85 * - opad (1 block) + digest of H(k ^ ipad || m)
86 */
87 u8 buffer[2 * SHA512_BLOCK_SIZE];
88 size_t buf_cnt;
89 dma_addr_t buf_dma_addr;
90 size_t buf_dma_count;
91 struct scatterlist *sg;
92 unsigned int sg_data_total;
93 unsigned int sg_data_offset;
94 unsigned int sg_dma_nents;
95 unsigned int nents;
96 };
97
98 /**
99 * struct ocs_hcu_drv - Driver data
100 * @dev_list: The list of HCU devices.
101 * @lock: The lock protecting dev_list.
102 */
103 struct ocs_hcu_drv {
104 struct list_head dev_list;
105 spinlock_t lock; /* Protects dev_list. */
106 };
107
108 static struct ocs_hcu_drv ocs_hcu = {
109 .dev_list = LIST_HEAD_INIT(ocs_hcu.dev_list),
110 .lock = __SPIN_LOCK_UNLOCKED(ocs_hcu.lock),
111 };
112
113 /*
114 * Return the total amount of data in the request; that is: the data in the
115 * request buffer + the data in the sg list.
116 */
kmb_get_total_data(struct ocs_hcu_rctx * rctx)117 static inline unsigned int kmb_get_total_data(struct ocs_hcu_rctx *rctx)
118 {
119 return rctx->sg_data_total + rctx->buf_cnt;
120 }
121
122 /* Move remaining content of scatter-gather list to context buffer. */
flush_sg_to_ocs_buffer(struct ocs_hcu_rctx * rctx)123 static int flush_sg_to_ocs_buffer(struct ocs_hcu_rctx *rctx)
124 {
125 size_t count;
126
127 if (rctx->sg_data_total > (sizeof(rctx->buffer) - rctx->buf_cnt)) {
128 WARN(1, "%s: sg data does not fit in buffer\n", __func__);
129 return -EINVAL;
130 }
131
132 while (rctx->sg_data_total) {
133 if (!rctx->sg) {
134 WARN(1, "%s: unexpected NULL sg\n", __func__);
135 return -EINVAL;
136 }
137 /*
138 * If current sg has been fully processed, skip to the next
139 * one.
140 */
141 if (rctx->sg_data_offset == rctx->sg->length) {
142 rctx->sg = sg_next(rctx->sg);
143 rctx->sg_data_offset = 0;
144 continue;
145 }
146 /*
147 * Determine the maximum data available to copy from the node.
148 * Minimum of the length left in the sg node, or the total data
149 * in the request.
150 */
151 count = min(rctx->sg->length - rctx->sg_data_offset,
152 rctx->sg_data_total);
153 /* Copy from scatter-list entry to context buffer. */
154 scatterwalk_map_and_copy(&rctx->buffer[rctx->buf_cnt],
155 rctx->sg, rctx->sg_data_offset,
156 count, 0);
157
158 rctx->sg_data_offset += count;
159 rctx->sg_data_total -= count;
160 rctx->buf_cnt += count;
161 }
162
163 return 0;
164 }
165
kmb_ocs_hcu_find_dev(struct ahash_request * req)166 static struct ocs_hcu_dev *kmb_ocs_hcu_find_dev(struct ahash_request *req)
167 {
168 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
169 struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
170
171 /* If the HCU device for the request was previously set, return it. */
172 if (tctx->hcu_dev)
173 return tctx->hcu_dev;
174
175 /*
176 * Otherwise, get the first HCU device available (there should be one
177 * and only one device).
178 */
179 spin_lock_bh(&ocs_hcu.lock);
180 tctx->hcu_dev = list_first_entry_or_null(&ocs_hcu.dev_list,
181 struct ocs_hcu_dev,
182 list);
183 spin_unlock_bh(&ocs_hcu.lock);
184
185 return tctx->hcu_dev;
186 }
187
188 /* Free OCS DMA linked list and DMA-able context buffer. */
kmb_ocs_hcu_dma_cleanup(struct ahash_request * req,struct ocs_hcu_rctx * rctx)189 static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req,
190 struct ocs_hcu_rctx *rctx)
191 {
192 struct ocs_hcu_dev *hcu_dev = rctx->hcu_dev;
193 struct device *dev = hcu_dev->dev;
194
195 /* Unmap rctx->buffer (if mapped). */
196 if (rctx->buf_dma_count) {
197 dma_unmap_single(dev, rctx->buf_dma_addr, rctx->buf_dma_count,
198 DMA_TO_DEVICE);
199 rctx->buf_dma_count = 0;
200 }
201
202 /* Unmap req->src (if mapped). */
203 if (rctx->sg_dma_nents) {
204 dma_unmap_sg(dev, req->src, rctx->nents, DMA_TO_DEVICE);
205 rctx->sg_dma_nents = 0;
206 }
207
208 /* Free dma_list (if allocated). */
209 if (rctx->dma_list) {
210 ocs_hcu_dma_list_free(hcu_dev, rctx->dma_list);
211 rctx->dma_list = NULL;
212 }
213 }
214
215 /*
216 * Prepare for DMA operation:
217 * - DMA-map request context buffer (if needed)
218 * - DMA-map SG list (only the entries to be processed, see note below)
219 * - Allocate OCS HCU DMA linked list (number of elements = SG entries to
220 * process + context buffer (if not empty)).
221 * - Add DMA-mapped request context buffer to OCS HCU DMA list.
222 * - Add SG entries to DMA list.
223 *
224 * Note: if this is a final request, we process all the data in the SG list,
225 * otherwise we can only process up to the maximum amount of block-aligned data
226 * (the remainder will be put into the context buffer and processed in the next
227 * request).
228 */
kmb_ocs_dma_prepare(struct ahash_request * req)229 static int kmb_ocs_dma_prepare(struct ahash_request *req)
230 {
231 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
232 struct device *dev = rctx->hcu_dev->dev;
233 unsigned int remainder = 0;
234 unsigned int total;
235 size_t nents;
236 size_t count;
237 int rc;
238 int i;
239
240 /* This function should be called only when there is data to process. */
241 total = kmb_get_total_data(rctx);
242 if (!total)
243 return -EINVAL;
244
245 /*
246 * If this is not a final DMA (terminated DMA), the data passed to the
247 * HCU must be aligned to the block size; compute the remainder data to
248 * be processed in the next request.
249 */
250 if (!(rctx->flags & REQ_FINAL))
251 remainder = total % rctx->blk_sz;
252
253 /* Determine the number of scatter gather list entries to process. */
254 nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder);
255
256 /* If there are entries to process, map them. */
257 if (nents) {
258 rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents,
259 DMA_TO_DEVICE);
260 if (!rctx->sg_dma_nents) {
261 dev_err(dev, "Failed to MAP SG\n");
262 rc = -ENOMEM;
263 goto cleanup;
264 }
265
266 /* Save the value of nents to pass to dma_unmap_sg. */
267 rctx->nents = nents;
268
269 /*
270 * The value returned by dma_map_sg() can be < nents; so update
271 * nents accordingly.
272 */
273 nents = rctx->sg_dma_nents;
274 }
275
276 /*
277 * If context buffer is not empty, map it and add extra DMA entry for
278 * it.
279 */
280 if (rctx->buf_cnt) {
281 rctx->buf_dma_addr = dma_map_single(dev, rctx->buffer,
282 rctx->buf_cnt,
283 DMA_TO_DEVICE);
284 if (dma_mapping_error(dev, rctx->buf_dma_addr)) {
285 dev_err(dev, "Failed to map request context buffer\n");
286 rc = -ENOMEM;
287 goto cleanup;
288 }
289 rctx->buf_dma_count = rctx->buf_cnt;
290 /* Increase number of dma entries. */
291 nents++;
292 }
293
294 /* Allocate OCS HCU DMA list. */
295 rctx->dma_list = ocs_hcu_dma_list_alloc(rctx->hcu_dev, nents);
296 if (!rctx->dma_list) {
297 rc = -ENOMEM;
298 goto cleanup;
299 }
300
301 /* Add request context buffer (if previously DMA-mapped) */
302 if (rctx->buf_dma_count) {
303 rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev, rctx->dma_list,
304 rctx->buf_dma_addr,
305 rctx->buf_dma_count);
306 if (rc)
307 goto cleanup;
308 }
309
310 /* Add the SG nodes to be processed to the DMA linked list. */
311 for_each_sg(req->src, rctx->sg, rctx->sg_dma_nents, i) {
312 /*
313 * The number of bytes to add to the list entry is the minimum
314 * between:
315 * - The DMA length of the SG entry.
316 * - The data left to be processed.
317 */
318 count = min(rctx->sg_data_total - remainder,
319 sg_dma_len(rctx->sg) - rctx->sg_data_offset);
320 /*
321 * Do not create a zero length DMA descriptor. Check in case of
322 * zero length SG node.
323 */
324 if (count == 0)
325 continue;
326 /* Add sg to HCU DMA list. */
327 rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev,
328 rctx->dma_list,
329 rctx->sg->dma_address,
330 count);
331 if (rc)
332 goto cleanup;
333
334 /* Update amount of data remaining in SG list. */
335 rctx->sg_data_total -= count;
336
337 /*
338 * If remaining data is equal to remainder (note: 'less than'
339 * case should never happen in practice), we are done: update
340 * offset and exit the loop.
341 */
342 if (rctx->sg_data_total <= remainder) {
343 WARN_ON(rctx->sg_data_total < remainder);
344 rctx->sg_data_offset += count;
345 break;
346 }
347
348 /*
349 * If we get here is because we need to process the next sg in
350 * the list; set offset within the sg to 0.
351 */
352 rctx->sg_data_offset = 0;
353 }
354
355 return 0;
356 cleanup:
357 dev_err(dev, "Failed to prepare DMA.\n");
358 kmb_ocs_hcu_dma_cleanup(req, rctx);
359
360 return rc;
361 }
362
kmb_ocs_hcu_secure_cleanup(struct ahash_request * req)363 static void kmb_ocs_hcu_secure_cleanup(struct ahash_request *req)
364 {
365 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
366
367 /* Clear buffer of any data. */
368 memzero_explicit(rctx->buffer, sizeof(rctx->buffer));
369 }
370
kmb_ocs_hcu_handle_queue(struct ahash_request * req)371 static int kmb_ocs_hcu_handle_queue(struct ahash_request *req)
372 {
373 struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
374
375 if (!hcu_dev)
376 return -ENOENT;
377
378 return crypto_transfer_hash_request_to_engine(hcu_dev->engine, req);
379 }
380
prepare_ipad(struct ahash_request * req)381 static int prepare_ipad(struct ahash_request *req)
382 {
383 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
384 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
385 struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
386 int i;
387
388 WARN(rctx->buf_cnt, "%s: Context buffer is not empty\n", __func__);
389 WARN(!(rctx->flags & REQ_FLAGS_HMAC_SW),
390 "%s: HMAC_SW flag is not set\n", __func__);
391 /*
392 * Key length must be equal to block size. If key is shorter,
393 * we pad it with zero (note: key cannot be longer, since
394 * longer keys are hashed by kmb_ocs_hcu_setkey()).
395 */
396 if (ctx->key_len > rctx->blk_sz) {
397 WARN(1, "%s: Invalid key length in tfm context\n", __func__);
398 return -EINVAL;
399 }
400 memzero_explicit(&ctx->key[ctx->key_len],
401 rctx->blk_sz - ctx->key_len);
402 ctx->key_len = rctx->blk_sz;
403 /*
404 * Prepare IPAD for HMAC. Only done for first block.
405 * HMAC(k,m) = H(k ^ opad || H(k ^ ipad || m))
406 * k ^ ipad will be first hashed block.
407 * k ^ opad will be calculated in the final request.
408 * Only needed if not using HW HMAC.
409 */
410 for (i = 0; i < rctx->blk_sz; i++)
411 rctx->buffer[i] = ctx->key[i] ^ HMAC_IPAD_VALUE;
412 rctx->buf_cnt = rctx->blk_sz;
413
414 return 0;
415 }
416
kmb_ocs_hcu_do_one_request(struct crypto_engine * engine,void * areq)417 static int kmb_ocs_hcu_do_one_request(struct crypto_engine *engine, void *areq)
418 {
419 struct ahash_request *req = container_of(areq, struct ahash_request,
420 base);
421 struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
422 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
423 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
424 struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
425 int rc;
426 int i;
427
428 if (!hcu_dev) {
429 rc = -ENOENT;
430 goto error;
431 }
432
433 /*
434 * If hardware HMAC flag is set, perform HMAC in hardware.
435 *
436 * NOTE: this flag implies REQ_FINAL && kmb_get_total_data(rctx)
437 */
438 if (rctx->flags & REQ_FLAGS_HMAC_HW) {
439 /* Map input data into the HCU DMA linked list. */
440 rc = kmb_ocs_dma_prepare(req);
441 if (rc)
442 goto error;
443
444 rc = ocs_hcu_hmac(hcu_dev, rctx->algo, tctx->key, tctx->key_len,
445 rctx->dma_list, req->result, rctx->dig_sz);
446
447 /* Unmap data and free DMA list regardless of return code. */
448 kmb_ocs_hcu_dma_cleanup(req, rctx);
449
450 /* Process previous return code. */
451 if (rc)
452 goto error;
453
454 goto done;
455 }
456
457 /* Handle update request case. */
458 if (!(rctx->flags & REQ_FINAL)) {
459 /* Update should always have input data. */
460 if (!kmb_get_total_data(rctx))
461 return -EINVAL;
462
463 /* Map input data into the HCU DMA linked list. */
464 rc = kmb_ocs_dma_prepare(req);
465 if (rc)
466 goto error;
467
468 /* Do hashing step. */
469 rc = ocs_hcu_hash_update(hcu_dev, &rctx->hash_ctx,
470 rctx->dma_list);
471
472 /* Unmap data and free DMA list regardless of return code. */
473 kmb_ocs_hcu_dma_cleanup(req, rctx);
474
475 /* Process previous return code. */
476 if (rc)
477 goto error;
478
479 /*
480 * Reset request buffer count (data in the buffer was just
481 * processed).
482 */
483 rctx->buf_cnt = 0;
484 /*
485 * Move remaining sg data into the request buffer, so that it
486 * will be processed during the next request.
487 *
488 * NOTE: we have remaining data if kmb_get_total_data() was not
489 * a multiple of block size.
490 */
491 rc = flush_sg_to_ocs_buffer(rctx);
492 if (rc)
493 goto error;
494
495 goto done;
496 }
497
498 /* If we get here, this is a final request. */
499
500 /* If there is data to process, use finup. */
501 if (kmb_get_total_data(rctx)) {
502 /* Map input data into the HCU DMA linked list. */
503 rc = kmb_ocs_dma_prepare(req);
504 if (rc)
505 goto error;
506
507 /* Do hashing step. */
508 rc = ocs_hcu_hash_finup(hcu_dev, &rctx->hash_ctx,
509 rctx->dma_list,
510 req->result, rctx->dig_sz);
511 /* Free DMA list regardless of return code. */
512 kmb_ocs_hcu_dma_cleanup(req, rctx);
513
514 /* Process previous return code. */
515 if (rc)
516 goto error;
517
518 } else { /* Otherwise (if we have no data), use final. */
519 rc = ocs_hcu_hash_final(hcu_dev, &rctx->hash_ctx, req->result,
520 rctx->dig_sz);
521 if (rc)
522 goto error;
523 }
524
525 /*
526 * If we are finalizing a SW HMAC request, we just computed the result
527 * of: H(k ^ ipad || m).
528 *
529 * We now need to complete the HMAC calculation with the OPAD step,
530 * that is, we need to compute H(k ^ opad || digest), where digest is
531 * the digest we just obtained, i.e., H(k ^ ipad || m).
532 */
533 if (rctx->flags & REQ_FLAGS_HMAC_SW) {
534 /*
535 * Compute k ^ opad and store it in the request buffer (which
536 * is not used anymore at this point).
537 * Note: key has been padded / hashed already (so keylen ==
538 * blksz) .
539 */
540 WARN_ON(tctx->key_len != rctx->blk_sz);
541 for (i = 0; i < rctx->blk_sz; i++)
542 rctx->buffer[i] = tctx->key[i] ^ HMAC_OPAD_VALUE;
543 /* Now append the digest to the rest of the buffer. */
544 for (i = 0; (i < rctx->dig_sz); i++)
545 rctx->buffer[rctx->blk_sz + i] = req->result[i];
546
547 /* Now hash the buffer to obtain the final HMAC. */
548 rc = ocs_hcu_digest(hcu_dev, rctx->algo, rctx->buffer,
549 rctx->blk_sz + rctx->dig_sz, req->result,
550 rctx->dig_sz);
551 if (rc)
552 goto error;
553 }
554
555 /* Perform secure clean-up. */
556 kmb_ocs_hcu_secure_cleanup(req);
557 done:
558 crypto_finalize_hash_request(hcu_dev->engine, req, 0);
559
560 return 0;
561
562 error:
563 kmb_ocs_hcu_secure_cleanup(req);
564 return rc;
565 }
566
kmb_ocs_hcu_init(struct ahash_request * req)567 static int kmb_ocs_hcu_init(struct ahash_request *req)
568 {
569 struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
570 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
571 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
572 struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
573
574 if (!hcu_dev)
575 return -ENOENT;
576
577 /* Initialize entire request context to zero. */
578 memset(rctx, 0, sizeof(*rctx));
579
580 rctx->hcu_dev = hcu_dev;
581 rctx->dig_sz = crypto_ahash_digestsize(tfm);
582
583 switch (rctx->dig_sz) {
584 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
585 case SHA224_DIGEST_SIZE:
586 rctx->blk_sz = SHA224_BLOCK_SIZE;
587 rctx->algo = OCS_HCU_ALGO_SHA224;
588 break;
589 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
590 case SHA256_DIGEST_SIZE:
591 rctx->blk_sz = SHA256_BLOCK_SIZE;
592 /*
593 * SHA256 and SM3 have the same digest size: use info from tfm
594 * context to find out which one we should use.
595 */
596 rctx->algo = ctx->is_sm3_tfm ? OCS_HCU_ALGO_SM3 :
597 OCS_HCU_ALGO_SHA256;
598 break;
599 case SHA384_DIGEST_SIZE:
600 rctx->blk_sz = SHA384_BLOCK_SIZE;
601 rctx->algo = OCS_HCU_ALGO_SHA384;
602 break;
603 case SHA512_DIGEST_SIZE:
604 rctx->blk_sz = SHA512_BLOCK_SIZE;
605 rctx->algo = OCS_HCU_ALGO_SHA512;
606 break;
607 default:
608 return -EINVAL;
609 }
610
611 /* Initialize intermediate data. */
612 ocs_hcu_hash_init(&rctx->hash_ctx, rctx->algo);
613
614 /* If this a HMAC request, set HMAC flag. */
615 if (ctx->is_hmac_tfm)
616 rctx->flags |= REQ_FLAGS_HMAC;
617
618 return 0;
619 }
620
kmb_ocs_hcu_update(struct ahash_request * req)621 static int kmb_ocs_hcu_update(struct ahash_request *req)
622 {
623 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
624 int rc;
625
626 if (!req->nbytes)
627 return 0;
628
629 rctx->sg_data_total = req->nbytes;
630 rctx->sg_data_offset = 0;
631 rctx->sg = req->src;
632
633 /*
634 * If we are doing HMAC, then we must use SW-assisted HMAC, since HW
635 * HMAC does not support context switching (there it can only be used
636 * with finup() or digest()).
637 */
638 if (rctx->flags & REQ_FLAGS_HMAC &&
639 !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
640 rctx->flags |= REQ_FLAGS_HMAC_SW;
641 rc = prepare_ipad(req);
642 if (rc)
643 return rc;
644 }
645
646 /*
647 * If remaining sg_data fits into ctx buffer, just copy it there; we'll
648 * process it at the next update() or final().
649 */
650 if (rctx->sg_data_total <= (sizeof(rctx->buffer) - rctx->buf_cnt))
651 return flush_sg_to_ocs_buffer(rctx);
652
653 return kmb_ocs_hcu_handle_queue(req);
654 }
655
656 /* Common logic for kmb_ocs_hcu_final() and kmb_ocs_hcu_finup(). */
kmb_ocs_hcu_fin_common(struct ahash_request * req)657 static int kmb_ocs_hcu_fin_common(struct ahash_request *req)
658 {
659 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
660 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
661 struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
662 int rc;
663
664 rctx->flags |= REQ_FINAL;
665
666 /*
667 * If this is a HMAC request and, so far, we didn't have to switch to
668 * SW HMAC, check if we can use HW HMAC.
669 */
670 if (rctx->flags & REQ_FLAGS_HMAC &&
671 !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
672 /*
673 * If we are here, it means we never processed any data so far,
674 * so we can use HW HMAC, but only if there is some data to
675 * process (since OCS HW MAC does not support zero-length
676 * messages) and the key length is supported by the hardware
677 * (OCS HCU HW only supports length <= 64); if HW HMAC cannot
678 * be used, fall back to SW-assisted HMAC.
679 */
680 if (kmb_get_total_data(rctx) &&
681 ctx->key_len <= OCS_HCU_HW_KEY_LEN) {
682 rctx->flags |= REQ_FLAGS_HMAC_HW;
683 } else {
684 rctx->flags |= REQ_FLAGS_HMAC_SW;
685 rc = prepare_ipad(req);
686 if (rc)
687 return rc;
688 }
689 }
690
691 return kmb_ocs_hcu_handle_queue(req);
692 }
693
kmb_ocs_hcu_final(struct ahash_request * req)694 static int kmb_ocs_hcu_final(struct ahash_request *req)
695 {
696 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
697
698 rctx->sg_data_total = 0;
699 rctx->sg_data_offset = 0;
700 rctx->sg = NULL;
701
702 return kmb_ocs_hcu_fin_common(req);
703 }
704
kmb_ocs_hcu_finup(struct ahash_request * req)705 static int kmb_ocs_hcu_finup(struct ahash_request *req)
706 {
707 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
708
709 rctx->sg_data_total = req->nbytes;
710 rctx->sg_data_offset = 0;
711 rctx->sg = req->src;
712
713 return kmb_ocs_hcu_fin_common(req);
714 }
715
kmb_ocs_hcu_digest(struct ahash_request * req)716 static int kmb_ocs_hcu_digest(struct ahash_request *req)
717 {
718 int rc = 0;
719 struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
720
721 if (!hcu_dev)
722 return -ENOENT;
723
724 rc = kmb_ocs_hcu_init(req);
725 if (rc)
726 return rc;
727
728 rc = kmb_ocs_hcu_finup(req);
729
730 return rc;
731 }
732
kmb_ocs_hcu_export(struct ahash_request * req,void * out)733 static int kmb_ocs_hcu_export(struct ahash_request *req, void *out)
734 {
735 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
736
737 /* Intermediate data is always stored and applied per request. */
738 memcpy(out, rctx, sizeof(*rctx));
739
740 return 0;
741 }
742
kmb_ocs_hcu_import(struct ahash_request * req,const void * in)743 static int kmb_ocs_hcu_import(struct ahash_request *req, const void *in)
744 {
745 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
746
747 /* Intermediate data is always stored and applied per request. */
748 memcpy(rctx, in, sizeof(*rctx));
749
750 return 0;
751 }
752
kmb_ocs_hcu_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)753 static int kmb_ocs_hcu_setkey(struct crypto_ahash *tfm, const u8 *key,
754 unsigned int keylen)
755 {
756 unsigned int digestsize = crypto_ahash_digestsize(tfm);
757 struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
758 size_t blk_sz = crypto_ahash_blocksize(tfm);
759 struct crypto_ahash *ahash_tfm;
760 struct ahash_request *req;
761 struct crypto_wait wait;
762 struct scatterlist sg;
763 const char *alg_name;
764 int rc;
765
766 /*
767 * Key length must be equal to block size:
768 * - If key is shorter, we are done for now (the key will be padded
769 * later on); this is to maximize the use of HW HMAC (which works
770 * only for keys <= 64 bytes).
771 * - If key is longer, we hash it.
772 */
773 if (keylen <= blk_sz) {
774 memcpy(ctx->key, key, keylen);
775 ctx->key_len = keylen;
776 return 0;
777 }
778
779 switch (digestsize) {
780 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
781 case SHA224_DIGEST_SIZE:
782 alg_name = "sha224-keembay-ocs";
783 break;
784 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
785 case SHA256_DIGEST_SIZE:
786 alg_name = ctx->is_sm3_tfm ? "sm3-keembay-ocs" :
787 "sha256-keembay-ocs";
788 break;
789 case SHA384_DIGEST_SIZE:
790 alg_name = "sha384-keembay-ocs";
791 break;
792 case SHA512_DIGEST_SIZE:
793 alg_name = "sha512-keembay-ocs";
794 break;
795 default:
796 return -EINVAL;
797 }
798
799 ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
800 if (IS_ERR(ahash_tfm))
801 return PTR_ERR(ahash_tfm);
802
803 req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
804 if (!req) {
805 rc = -ENOMEM;
806 goto err_free_ahash;
807 }
808
809 crypto_init_wait(&wait);
810 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
811 crypto_req_done, &wait);
812 crypto_ahash_clear_flags(ahash_tfm, ~0);
813
814 sg_init_one(&sg, key, keylen);
815 ahash_request_set_crypt(req, &sg, ctx->key, keylen);
816
817 rc = crypto_wait_req(crypto_ahash_digest(req), &wait);
818 if (rc == 0)
819 ctx->key_len = digestsize;
820
821 ahash_request_free(req);
822 err_free_ahash:
823 crypto_free_ahash(ahash_tfm);
824
825 return rc;
826 }
827
828 /* Set request size and initialize tfm context. */
__cra_init(struct crypto_tfm * tfm,struct ocs_hcu_ctx * ctx)829 static void __cra_init(struct crypto_tfm *tfm, struct ocs_hcu_ctx *ctx)
830 {
831 crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
832 sizeof(struct ocs_hcu_rctx));
833 }
834
kmb_ocs_hcu_sha_cra_init(struct crypto_tfm * tfm)835 static int kmb_ocs_hcu_sha_cra_init(struct crypto_tfm *tfm)
836 {
837 struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
838
839 __cra_init(tfm, ctx);
840
841 return 0;
842 }
843
kmb_ocs_hcu_sm3_cra_init(struct crypto_tfm * tfm)844 static int kmb_ocs_hcu_sm3_cra_init(struct crypto_tfm *tfm)
845 {
846 struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
847
848 __cra_init(tfm, ctx);
849
850 ctx->is_sm3_tfm = true;
851
852 return 0;
853 }
854
kmb_ocs_hcu_hmac_sm3_cra_init(struct crypto_tfm * tfm)855 static int kmb_ocs_hcu_hmac_sm3_cra_init(struct crypto_tfm *tfm)
856 {
857 struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
858
859 __cra_init(tfm, ctx);
860
861 ctx->is_sm3_tfm = true;
862 ctx->is_hmac_tfm = true;
863
864 return 0;
865 }
866
kmb_ocs_hcu_hmac_cra_init(struct crypto_tfm * tfm)867 static int kmb_ocs_hcu_hmac_cra_init(struct crypto_tfm *tfm)
868 {
869 struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
870
871 __cra_init(tfm, ctx);
872
873 ctx->is_hmac_tfm = true;
874
875 return 0;
876 }
877
878 /* Function called when 'tfm' is de-initialized. */
kmb_ocs_hcu_hmac_cra_exit(struct crypto_tfm * tfm)879 static void kmb_ocs_hcu_hmac_cra_exit(struct crypto_tfm *tfm)
880 {
881 struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
882
883 /* Clear the key. */
884 memzero_explicit(ctx->key, sizeof(ctx->key));
885 }
886
887 static struct ahash_engine_alg ocs_hcu_algs[] = {
888 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
889 {
890 .base.init = kmb_ocs_hcu_init,
891 .base.update = kmb_ocs_hcu_update,
892 .base.final = kmb_ocs_hcu_final,
893 .base.finup = kmb_ocs_hcu_finup,
894 .base.digest = kmb_ocs_hcu_digest,
895 .base.export = kmb_ocs_hcu_export,
896 .base.import = kmb_ocs_hcu_import,
897 .base.halg = {
898 .digestsize = SHA224_DIGEST_SIZE,
899 .statesize = sizeof(struct ocs_hcu_rctx),
900 .base = {
901 .cra_name = "sha224",
902 .cra_driver_name = "sha224-keembay-ocs",
903 .cra_priority = 255,
904 .cra_flags = CRYPTO_ALG_ASYNC,
905 .cra_blocksize = SHA224_BLOCK_SIZE,
906 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
907 .cra_alignmask = 0,
908 .cra_module = THIS_MODULE,
909 .cra_init = kmb_ocs_hcu_sha_cra_init,
910 }
911 },
912 .op.do_one_request = kmb_ocs_hcu_do_one_request,
913 },
914 {
915 .base.init = kmb_ocs_hcu_init,
916 .base.update = kmb_ocs_hcu_update,
917 .base.final = kmb_ocs_hcu_final,
918 .base.finup = kmb_ocs_hcu_finup,
919 .base.digest = kmb_ocs_hcu_digest,
920 .base.export = kmb_ocs_hcu_export,
921 .base.import = kmb_ocs_hcu_import,
922 .base.setkey = kmb_ocs_hcu_setkey,
923 .base.halg = {
924 .digestsize = SHA224_DIGEST_SIZE,
925 .statesize = sizeof(struct ocs_hcu_rctx),
926 .base = {
927 .cra_name = "hmac(sha224)",
928 .cra_driver_name = "hmac-sha224-keembay-ocs",
929 .cra_priority = 255,
930 .cra_flags = CRYPTO_ALG_ASYNC,
931 .cra_blocksize = SHA224_BLOCK_SIZE,
932 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
933 .cra_alignmask = 0,
934 .cra_module = THIS_MODULE,
935 .cra_init = kmb_ocs_hcu_hmac_cra_init,
936 .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
937 }
938 },
939 .op.do_one_request = kmb_ocs_hcu_do_one_request,
940 },
941 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
942 {
943 .base.init = kmb_ocs_hcu_init,
944 .base.update = kmb_ocs_hcu_update,
945 .base.final = kmb_ocs_hcu_final,
946 .base.finup = kmb_ocs_hcu_finup,
947 .base.digest = kmb_ocs_hcu_digest,
948 .base.export = kmb_ocs_hcu_export,
949 .base.import = kmb_ocs_hcu_import,
950 .base.halg = {
951 .digestsize = SHA256_DIGEST_SIZE,
952 .statesize = sizeof(struct ocs_hcu_rctx),
953 .base = {
954 .cra_name = "sha256",
955 .cra_driver_name = "sha256-keembay-ocs",
956 .cra_priority = 255,
957 .cra_flags = CRYPTO_ALG_ASYNC,
958 .cra_blocksize = SHA256_BLOCK_SIZE,
959 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
960 .cra_alignmask = 0,
961 .cra_module = THIS_MODULE,
962 .cra_init = kmb_ocs_hcu_sha_cra_init,
963 }
964 },
965 .op.do_one_request = kmb_ocs_hcu_do_one_request,
966 },
967 {
968 .base.init = kmb_ocs_hcu_init,
969 .base.update = kmb_ocs_hcu_update,
970 .base.final = kmb_ocs_hcu_final,
971 .base.finup = kmb_ocs_hcu_finup,
972 .base.digest = kmb_ocs_hcu_digest,
973 .base.export = kmb_ocs_hcu_export,
974 .base.import = kmb_ocs_hcu_import,
975 .base.setkey = kmb_ocs_hcu_setkey,
976 .base.halg = {
977 .digestsize = SHA256_DIGEST_SIZE,
978 .statesize = sizeof(struct ocs_hcu_rctx),
979 .base = {
980 .cra_name = "hmac(sha256)",
981 .cra_driver_name = "hmac-sha256-keembay-ocs",
982 .cra_priority = 255,
983 .cra_flags = CRYPTO_ALG_ASYNC,
984 .cra_blocksize = SHA256_BLOCK_SIZE,
985 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
986 .cra_alignmask = 0,
987 .cra_module = THIS_MODULE,
988 .cra_init = kmb_ocs_hcu_hmac_cra_init,
989 .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
990 }
991 },
992 .op.do_one_request = kmb_ocs_hcu_do_one_request,
993 },
994 {
995 .base.init = kmb_ocs_hcu_init,
996 .base.update = kmb_ocs_hcu_update,
997 .base.final = kmb_ocs_hcu_final,
998 .base.finup = kmb_ocs_hcu_finup,
999 .base.digest = kmb_ocs_hcu_digest,
1000 .base.export = kmb_ocs_hcu_export,
1001 .base.import = kmb_ocs_hcu_import,
1002 .base.halg = {
1003 .digestsize = SM3_DIGEST_SIZE,
1004 .statesize = sizeof(struct ocs_hcu_rctx),
1005 .base = {
1006 .cra_name = "sm3",
1007 .cra_driver_name = "sm3-keembay-ocs",
1008 .cra_priority = 255,
1009 .cra_flags = CRYPTO_ALG_ASYNC,
1010 .cra_blocksize = SM3_BLOCK_SIZE,
1011 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
1012 .cra_alignmask = 0,
1013 .cra_module = THIS_MODULE,
1014 .cra_init = kmb_ocs_hcu_sm3_cra_init,
1015 }
1016 },
1017 .op.do_one_request = kmb_ocs_hcu_do_one_request,
1018 },
1019 {
1020 .base.init = kmb_ocs_hcu_init,
1021 .base.update = kmb_ocs_hcu_update,
1022 .base.final = kmb_ocs_hcu_final,
1023 .base.finup = kmb_ocs_hcu_finup,
1024 .base.digest = kmb_ocs_hcu_digest,
1025 .base.export = kmb_ocs_hcu_export,
1026 .base.import = kmb_ocs_hcu_import,
1027 .base.setkey = kmb_ocs_hcu_setkey,
1028 .base.halg = {
1029 .digestsize = SM3_DIGEST_SIZE,
1030 .statesize = sizeof(struct ocs_hcu_rctx),
1031 .base = {
1032 .cra_name = "hmac(sm3)",
1033 .cra_driver_name = "hmac-sm3-keembay-ocs",
1034 .cra_priority = 255,
1035 .cra_flags = CRYPTO_ALG_ASYNC,
1036 .cra_blocksize = SM3_BLOCK_SIZE,
1037 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
1038 .cra_alignmask = 0,
1039 .cra_module = THIS_MODULE,
1040 .cra_init = kmb_ocs_hcu_hmac_sm3_cra_init,
1041 .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
1042 }
1043 },
1044 .op.do_one_request = kmb_ocs_hcu_do_one_request,
1045 },
1046 {
1047 .base.init = kmb_ocs_hcu_init,
1048 .base.update = kmb_ocs_hcu_update,
1049 .base.final = kmb_ocs_hcu_final,
1050 .base.finup = kmb_ocs_hcu_finup,
1051 .base.digest = kmb_ocs_hcu_digest,
1052 .base.export = kmb_ocs_hcu_export,
1053 .base.import = kmb_ocs_hcu_import,
1054 .base.halg = {
1055 .digestsize = SHA384_DIGEST_SIZE,
1056 .statesize = sizeof(struct ocs_hcu_rctx),
1057 .base = {
1058 .cra_name = "sha384",
1059 .cra_driver_name = "sha384-keembay-ocs",
1060 .cra_priority = 255,
1061 .cra_flags = CRYPTO_ALG_ASYNC,
1062 .cra_blocksize = SHA384_BLOCK_SIZE,
1063 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
1064 .cra_alignmask = 0,
1065 .cra_module = THIS_MODULE,
1066 .cra_init = kmb_ocs_hcu_sha_cra_init,
1067 }
1068 },
1069 .op.do_one_request = kmb_ocs_hcu_do_one_request,
1070 },
1071 {
1072 .base.init = kmb_ocs_hcu_init,
1073 .base.update = kmb_ocs_hcu_update,
1074 .base.final = kmb_ocs_hcu_final,
1075 .base.finup = kmb_ocs_hcu_finup,
1076 .base.digest = kmb_ocs_hcu_digest,
1077 .base.export = kmb_ocs_hcu_export,
1078 .base.import = kmb_ocs_hcu_import,
1079 .base.setkey = kmb_ocs_hcu_setkey,
1080 .base.halg = {
1081 .digestsize = SHA384_DIGEST_SIZE,
1082 .statesize = sizeof(struct ocs_hcu_rctx),
1083 .base = {
1084 .cra_name = "hmac(sha384)",
1085 .cra_driver_name = "hmac-sha384-keembay-ocs",
1086 .cra_priority = 255,
1087 .cra_flags = CRYPTO_ALG_ASYNC,
1088 .cra_blocksize = SHA384_BLOCK_SIZE,
1089 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
1090 .cra_alignmask = 0,
1091 .cra_module = THIS_MODULE,
1092 .cra_init = kmb_ocs_hcu_hmac_cra_init,
1093 .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
1094 }
1095 },
1096 .op.do_one_request = kmb_ocs_hcu_do_one_request,
1097 },
1098 {
1099 .base.init = kmb_ocs_hcu_init,
1100 .base.update = kmb_ocs_hcu_update,
1101 .base.final = kmb_ocs_hcu_final,
1102 .base.finup = kmb_ocs_hcu_finup,
1103 .base.digest = kmb_ocs_hcu_digest,
1104 .base.export = kmb_ocs_hcu_export,
1105 .base.import = kmb_ocs_hcu_import,
1106 .base.halg = {
1107 .digestsize = SHA512_DIGEST_SIZE,
1108 .statesize = sizeof(struct ocs_hcu_rctx),
1109 .base = {
1110 .cra_name = "sha512",
1111 .cra_driver_name = "sha512-keembay-ocs",
1112 .cra_priority = 255,
1113 .cra_flags = CRYPTO_ALG_ASYNC,
1114 .cra_blocksize = SHA512_BLOCK_SIZE,
1115 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
1116 .cra_alignmask = 0,
1117 .cra_module = THIS_MODULE,
1118 .cra_init = kmb_ocs_hcu_sha_cra_init,
1119 }
1120 },
1121 .op.do_one_request = kmb_ocs_hcu_do_one_request,
1122 },
1123 {
1124 .base.init = kmb_ocs_hcu_init,
1125 .base.update = kmb_ocs_hcu_update,
1126 .base.final = kmb_ocs_hcu_final,
1127 .base.finup = kmb_ocs_hcu_finup,
1128 .base.digest = kmb_ocs_hcu_digest,
1129 .base.export = kmb_ocs_hcu_export,
1130 .base.import = kmb_ocs_hcu_import,
1131 .base.setkey = kmb_ocs_hcu_setkey,
1132 .base.halg = {
1133 .digestsize = SHA512_DIGEST_SIZE,
1134 .statesize = sizeof(struct ocs_hcu_rctx),
1135 .base = {
1136 .cra_name = "hmac(sha512)",
1137 .cra_driver_name = "hmac-sha512-keembay-ocs",
1138 .cra_priority = 255,
1139 .cra_flags = CRYPTO_ALG_ASYNC,
1140 .cra_blocksize = SHA512_BLOCK_SIZE,
1141 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
1142 .cra_alignmask = 0,
1143 .cra_module = THIS_MODULE,
1144 .cra_init = kmb_ocs_hcu_hmac_cra_init,
1145 .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
1146 }
1147 },
1148 .op.do_one_request = kmb_ocs_hcu_do_one_request,
1149 },
1150 };
1151
1152 /* Device tree driver match. */
1153 static const struct of_device_id kmb_ocs_hcu_of_match[] = {
1154 {
1155 .compatible = "intel,keembay-ocs-hcu",
1156 },
1157 {}
1158 };
1159 MODULE_DEVICE_TABLE(of, kmb_ocs_hcu_of_match);
1160
kmb_ocs_hcu_remove(struct platform_device * pdev)1161 static void kmb_ocs_hcu_remove(struct platform_device *pdev)
1162 {
1163 struct ocs_hcu_dev *hcu_dev = platform_get_drvdata(pdev);
1164
1165 crypto_engine_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
1166
1167 crypto_engine_exit(hcu_dev->engine);
1168
1169 spin_lock_bh(&ocs_hcu.lock);
1170 list_del(&hcu_dev->list);
1171 spin_unlock_bh(&ocs_hcu.lock);
1172 }
1173
kmb_ocs_hcu_probe(struct platform_device * pdev)1174 static int kmb_ocs_hcu_probe(struct platform_device *pdev)
1175 {
1176 struct device *dev = &pdev->dev;
1177 struct ocs_hcu_dev *hcu_dev;
1178 int rc;
1179
1180 hcu_dev = devm_kzalloc(dev, sizeof(*hcu_dev), GFP_KERNEL);
1181 if (!hcu_dev)
1182 return -ENOMEM;
1183
1184 hcu_dev->dev = dev;
1185
1186 platform_set_drvdata(pdev, hcu_dev);
1187 rc = dma_set_mask_and_coherent(&pdev->dev, OCS_HCU_DMA_BIT_MASK);
1188 if (rc)
1189 return rc;
1190
1191 hcu_dev->io_base = devm_platform_ioremap_resource(pdev, 0);
1192 if (IS_ERR(hcu_dev->io_base))
1193 return PTR_ERR(hcu_dev->io_base);
1194
1195 init_completion(&hcu_dev->irq_done);
1196
1197 /* Get and request IRQ. */
1198 hcu_dev->irq = platform_get_irq(pdev, 0);
1199 if (hcu_dev->irq < 0)
1200 return hcu_dev->irq;
1201
1202 rc = devm_request_threaded_irq(&pdev->dev, hcu_dev->irq,
1203 ocs_hcu_irq_handler, NULL, 0,
1204 "keembay-ocs-hcu", hcu_dev);
1205 if (rc < 0) {
1206 dev_err(dev, "Could not request IRQ.\n");
1207 return rc;
1208 }
1209
1210 INIT_LIST_HEAD(&hcu_dev->list);
1211
1212 spin_lock_bh(&ocs_hcu.lock);
1213 list_add_tail(&hcu_dev->list, &ocs_hcu.dev_list);
1214 spin_unlock_bh(&ocs_hcu.lock);
1215
1216 /* Initialize crypto engine */
1217 hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
1218 if (!hcu_dev->engine) {
1219 rc = -ENOMEM;
1220 goto list_del;
1221 }
1222
1223 rc = crypto_engine_start(hcu_dev->engine);
1224 if (rc) {
1225 dev_err(dev, "Could not start engine.\n");
1226 goto cleanup;
1227 }
1228
1229 /* Security infrastructure guarantees OCS clock is enabled. */
1230
1231 rc = crypto_engine_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
1232 if (rc) {
1233 dev_err(dev, "Could not register algorithms.\n");
1234 goto cleanup;
1235 }
1236
1237 return 0;
1238
1239 cleanup:
1240 crypto_engine_exit(hcu_dev->engine);
1241 list_del:
1242 spin_lock_bh(&ocs_hcu.lock);
1243 list_del(&hcu_dev->list);
1244 spin_unlock_bh(&ocs_hcu.lock);
1245
1246 return rc;
1247 }
1248
1249 /* The OCS driver is a platform device. */
1250 static struct platform_driver kmb_ocs_hcu_driver = {
1251 .probe = kmb_ocs_hcu_probe,
1252 .remove_new = kmb_ocs_hcu_remove,
1253 .driver = {
1254 .name = DRV_NAME,
1255 .of_match_table = kmb_ocs_hcu_of_match,
1256 },
1257 };
1258
1259 module_platform_driver(kmb_ocs_hcu_driver);
1260
1261 MODULE_LICENSE("GPL");
1262