1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Handle async block request by crypto hardware engine.
4 *
5 * Copyright (C) 2016 Linaro, Inc.
6 *
7 * Author: Baolin Wang <baolin.wang@linaro.org>
8 */
9
10 #include <linux/err.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <crypto/engine.h>
14 #include <uapi/linux/sched/types.h>
15 #include "internal.h"
16
17 #define CRYPTO_ENGINE_MAX_QLEN 10
18
19 /**
20 * crypto_finalize_request - finalize one request if the request is done
21 * @engine: the hardware engine
22 * @req: the request need to be finalized
23 * @err: error number
24 */
crypto_finalize_request(struct crypto_engine * engine,struct crypto_async_request * req,int err)25 static void crypto_finalize_request(struct crypto_engine *engine,
26 struct crypto_async_request *req, int err)
27 {
28 unsigned long flags;
29 bool finalize_req = false;
30 int ret;
31 struct crypto_engine_ctx *enginectx;
32
33 /*
34 * If hardware cannot enqueue more requests
35 * and retry mechanism is not supported
36 * make sure we are completing the current request
37 */
38 if (!engine->retry_support) {
39 spin_lock_irqsave(&engine->queue_lock, flags);
40 if (engine->cur_req == req) {
41 finalize_req = true;
42 engine->cur_req = NULL;
43 }
44 spin_unlock_irqrestore(&engine->queue_lock, flags);
45 }
46
47 if (finalize_req || engine->retry_support) {
48 enginectx = crypto_tfm_ctx(req->tfm);
49 if (enginectx->op.prepare_request &&
50 enginectx->op.unprepare_request) {
51 ret = enginectx->op.unprepare_request(engine, req);
52 if (ret)
53 dev_err(engine->dev, "failed to unprepare request\n");
54 }
55 }
56 lockdep_assert_in_softirq();
57 crypto_request_complete(req, err);
58
59 kthread_queue_work(engine->kworker, &engine->pump_requests);
60 }
61
62 /**
63 * crypto_pump_requests - dequeue one request from engine queue to process
64 * @engine: the hardware engine
65 * @in_kthread: true if we are in the context of the request pump thread
66 *
67 * This function checks if there is any request in the engine queue that
68 * needs processing and if so call out to the driver to initialize hardware
69 * and handle each request.
70 */
crypto_pump_requests(struct crypto_engine * engine,bool in_kthread)71 static void crypto_pump_requests(struct crypto_engine *engine,
72 bool in_kthread)
73 {
74 struct crypto_async_request *async_req, *backlog;
75 unsigned long flags;
76 bool was_busy = false;
77 int ret;
78 struct crypto_engine_ctx *enginectx;
79
80 spin_lock_irqsave(&engine->queue_lock, flags);
81
82 /* Make sure we are not already running a request */
83 if (!engine->retry_support && engine->cur_req)
84 goto out;
85
86 /* If another context is idling then defer */
87 if (engine->idling) {
88 kthread_queue_work(engine->kworker, &engine->pump_requests);
89 goto out;
90 }
91
92 /* Check if the engine queue is idle */
93 if (!crypto_queue_len(&engine->queue) || !engine->running) {
94 if (!engine->busy)
95 goto out;
96
97 /* Only do teardown in the thread */
98 if (!in_kthread) {
99 kthread_queue_work(engine->kworker,
100 &engine->pump_requests);
101 goto out;
102 }
103
104 engine->busy = false;
105 engine->idling = true;
106 spin_unlock_irqrestore(&engine->queue_lock, flags);
107
108 if (engine->unprepare_crypt_hardware &&
109 engine->unprepare_crypt_hardware(engine))
110 dev_err(engine->dev, "failed to unprepare crypt hardware\n");
111
112 spin_lock_irqsave(&engine->queue_lock, flags);
113 engine->idling = false;
114 goto out;
115 }
116
117 start_request:
118 /* Get the fist request from the engine queue to handle */
119 backlog = crypto_get_backlog(&engine->queue);
120 async_req = crypto_dequeue_request(&engine->queue);
121 if (!async_req)
122 goto out;
123
124 /*
125 * If hardware doesn't support the retry mechanism,
126 * keep track of the request we are processing now.
127 * We'll need it on completion (crypto_finalize_request).
128 */
129 if (!engine->retry_support)
130 engine->cur_req = async_req;
131
132 if (engine->busy)
133 was_busy = true;
134 else
135 engine->busy = true;
136
137 spin_unlock_irqrestore(&engine->queue_lock, flags);
138
139 /* Until here we get the request need to be encrypted successfully */
140 if (!was_busy && engine->prepare_crypt_hardware) {
141 ret = engine->prepare_crypt_hardware(engine);
142 if (ret) {
143 dev_err(engine->dev, "failed to prepare crypt hardware\n");
144 goto req_err_2;
145 }
146 }
147
148 enginectx = crypto_tfm_ctx(async_req->tfm);
149
150 if (enginectx->op.prepare_request) {
151 ret = enginectx->op.prepare_request(engine, async_req);
152 if (ret) {
153 dev_err(engine->dev, "failed to prepare request: %d\n",
154 ret);
155 goto req_err_2;
156 }
157 }
158 if (!enginectx->op.do_one_request) {
159 dev_err(engine->dev, "failed to do request\n");
160 ret = -EINVAL;
161 goto req_err_1;
162 }
163
164 ret = enginectx->op.do_one_request(engine, async_req);
165
166 /* Request unsuccessfully executed by hardware */
167 if (ret < 0) {
168 /*
169 * If hardware queue is full (-ENOSPC), requeue request
170 * regardless of backlog flag.
171 * Otherwise, unprepare and complete the request.
172 */
173 if (!engine->retry_support ||
174 (ret != -ENOSPC)) {
175 dev_err(engine->dev,
176 "Failed to do one request from queue: %d\n",
177 ret);
178 goto req_err_1;
179 }
180 /*
181 * If retry mechanism is supported,
182 * unprepare current request and
183 * enqueue it back into crypto-engine queue.
184 */
185 if (enginectx->op.unprepare_request) {
186 ret = enginectx->op.unprepare_request(engine,
187 async_req);
188 if (ret)
189 dev_err(engine->dev,
190 "failed to unprepare request\n");
191 }
192 spin_lock_irqsave(&engine->queue_lock, flags);
193 /*
194 * If hardware was unable to execute request, enqueue it
195 * back in front of crypto-engine queue, to keep the order
196 * of requests.
197 */
198 crypto_enqueue_request_head(&engine->queue, async_req);
199
200 kthread_queue_work(engine->kworker, &engine->pump_requests);
201 goto out;
202 }
203
204 goto retry;
205
206 req_err_1:
207 if (enginectx->op.unprepare_request) {
208 ret = enginectx->op.unprepare_request(engine, async_req);
209 if (ret)
210 dev_err(engine->dev, "failed to unprepare request\n");
211 }
212
213 req_err_2:
214 crypto_request_complete(async_req, ret);
215
216 retry:
217 if (backlog)
218 crypto_request_complete(backlog, -EINPROGRESS);
219
220 /* If retry mechanism is supported, send new requests to engine */
221 if (engine->retry_support) {
222 spin_lock_irqsave(&engine->queue_lock, flags);
223 goto start_request;
224 }
225 return;
226
227 out:
228 spin_unlock_irqrestore(&engine->queue_lock, flags);
229
230 /*
231 * Batch requests is possible only if
232 * hardware can enqueue multiple requests
233 */
234 if (engine->do_batch_requests) {
235 ret = engine->do_batch_requests(engine);
236 if (ret)
237 dev_err(engine->dev, "failed to do batch requests: %d\n",
238 ret);
239 }
240
241 return;
242 }
243
crypto_pump_work(struct kthread_work * work)244 static void crypto_pump_work(struct kthread_work *work)
245 {
246 struct crypto_engine *engine =
247 container_of(work, struct crypto_engine, pump_requests);
248
249 crypto_pump_requests(engine, true);
250 }
251
252 /**
253 * crypto_transfer_request - transfer the new request into the engine queue
254 * @engine: the hardware engine
255 * @req: the request need to be listed into the engine queue
256 */
crypto_transfer_request(struct crypto_engine * engine,struct crypto_async_request * req,bool need_pump)257 static int crypto_transfer_request(struct crypto_engine *engine,
258 struct crypto_async_request *req,
259 bool need_pump)
260 {
261 unsigned long flags;
262 int ret;
263
264 spin_lock_irqsave(&engine->queue_lock, flags);
265
266 if (!engine->running) {
267 spin_unlock_irqrestore(&engine->queue_lock, flags);
268 return -ESHUTDOWN;
269 }
270
271 ret = crypto_enqueue_request(&engine->queue, req);
272
273 if (!engine->busy && need_pump)
274 kthread_queue_work(engine->kworker, &engine->pump_requests);
275
276 spin_unlock_irqrestore(&engine->queue_lock, flags);
277 return ret;
278 }
279
280 /**
281 * crypto_transfer_request_to_engine - transfer one request to list
282 * into the engine queue
283 * @engine: the hardware engine
284 * @req: the request need to be listed into the engine queue
285 */
crypto_transfer_request_to_engine(struct crypto_engine * engine,struct crypto_async_request * req)286 static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
287 struct crypto_async_request *req)
288 {
289 return crypto_transfer_request(engine, req, true);
290 }
291
292 /**
293 * crypto_transfer_aead_request_to_engine - transfer one aead_request
294 * to list into the engine queue
295 * @engine: the hardware engine
296 * @req: the request need to be listed into the engine queue
297 */
crypto_transfer_aead_request_to_engine(struct crypto_engine * engine,struct aead_request * req)298 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
299 struct aead_request *req)
300 {
301 return crypto_transfer_request_to_engine(engine, &req->base);
302 }
303 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
304
305 /**
306 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
307 * to list into the engine queue
308 * @engine: the hardware engine
309 * @req: the request need to be listed into the engine queue
310 */
crypto_transfer_akcipher_request_to_engine(struct crypto_engine * engine,struct akcipher_request * req)311 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
312 struct akcipher_request *req)
313 {
314 return crypto_transfer_request_to_engine(engine, &req->base);
315 }
316 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
317
318 /**
319 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
320 * to list into the engine queue
321 * @engine: the hardware engine
322 * @req: the request need to be listed into the engine queue
323 */
crypto_transfer_hash_request_to_engine(struct crypto_engine * engine,struct ahash_request * req)324 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
325 struct ahash_request *req)
326 {
327 return crypto_transfer_request_to_engine(engine, &req->base);
328 }
329 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
330
331 /**
332 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
333 * to list into the engine queue
334 * @engine: the hardware engine
335 * @req: the request need to be listed into the engine queue
336 */
crypto_transfer_skcipher_request_to_engine(struct crypto_engine * engine,struct skcipher_request * req)337 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
338 struct skcipher_request *req)
339 {
340 return crypto_transfer_request_to_engine(engine, &req->base);
341 }
342 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
343
344 /**
345 * crypto_finalize_aead_request - finalize one aead_request if
346 * the request is done
347 * @engine: the hardware engine
348 * @req: the request need to be finalized
349 * @err: error number
350 */
crypto_finalize_aead_request(struct crypto_engine * engine,struct aead_request * req,int err)351 void crypto_finalize_aead_request(struct crypto_engine *engine,
352 struct aead_request *req, int err)
353 {
354 return crypto_finalize_request(engine, &req->base, err);
355 }
356 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
357
358 /**
359 * crypto_finalize_akcipher_request - finalize one akcipher_request if
360 * the request is done
361 * @engine: the hardware engine
362 * @req: the request need to be finalized
363 * @err: error number
364 */
crypto_finalize_akcipher_request(struct crypto_engine * engine,struct akcipher_request * req,int err)365 void crypto_finalize_akcipher_request(struct crypto_engine *engine,
366 struct akcipher_request *req, int err)
367 {
368 return crypto_finalize_request(engine, &req->base, err);
369 }
370 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
371
372 /**
373 * crypto_finalize_hash_request - finalize one ahash_request if
374 * the request is done
375 * @engine: the hardware engine
376 * @req: the request need to be finalized
377 * @err: error number
378 */
crypto_finalize_hash_request(struct crypto_engine * engine,struct ahash_request * req,int err)379 void crypto_finalize_hash_request(struct crypto_engine *engine,
380 struct ahash_request *req, int err)
381 {
382 return crypto_finalize_request(engine, &req->base, err);
383 }
384 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
385
386 /**
387 * crypto_finalize_skcipher_request - finalize one skcipher_request if
388 * the request is done
389 * @engine: the hardware engine
390 * @req: the request need to be finalized
391 * @err: error number
392 */
crypto_finalize_skcipher_request(struct crypto_engine * engine,struct skcipher_request * req,int err)393 void crypto_finalize_skcipher_request(struct crypto_engine *engine,
394 struct skcipher_request *req, int err)
395 {
396 return crypto_finalize_request(engine, &req->base, err);
397 }
398 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
399
400 /**
401 * crypto_engine_start - start the hardware engine
402 * @engine: the hardware engine need to be started
403 *
404 * Return 0 on success, else on fail.
405 */
crypto_engine_start(struct crypto_engine * engine)406 int crypto_engine_start(struct crypto_engine *engine)
407 {
408 unsigned long flags;
409
410 spin_lock_irqsave(&engine->queue_lock, flags);
411
412 if (engine->running || engine->busy) {
413 spin_unlock_irqrestore(&engine->queue_lock, flags);
414 return -EBUSY;
415 }
416
417 engine->running = true;
418 spin_unlock_irqrestore(&engine->queue_lock, flags);
419
420 kthread_queue_work(engine->kworker, &engine->pump_requests);
421
422 return 0;
423 }
424 EXPORT_SYMBOL_GPL(crypto_engine_start);
425
426 /**
427 * crypto_engine_stop - stop the hardware engine
428 * @engine: the hardware engine need to be stopped
429 *
430 * Return 0 on success, else on fail.
431 */
crypto_engine_stop(struct crypto_engine * engine)432 int crypto_engine_stop(struct crypto_engine *engine)
433 {
434 unsigned long flags;
435 unsigned int limit = 500;
436 int ret = 0;
437
438 spin_lock_irqsave(&engine->queue_lock, flags);
439
440 /*
441 * If the engine queue is not empty or the engine is on busy state,
442 * we need to wait for a while to pump the requests of engine queue.
443 */
444 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
445 spin_unlock_irqrestore(&engine->queue_lock, flags);
446 msleep(20);
447 spin_lock_irqsave(&engine->queue_lock, flags);
448 }
449
450 if (crypto_queue_len(&engine->queue) || engine->busy)
451 ret = -EBUSY;
452 else
453 engine->running = false;
454
455 spin_unlock_irqrestore(&engine->queue_lock, flags);
456
457 if (ret)
458 dev_warn(engine->dev, "could not stop engine\n");
459
460 return ret;
461 }
462 EXPORT_SYMBOL_GPL(crypto_engine_stop);
463
464 /**
465 * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
466 * and initialize it by setting the maximum number of entries in the software
467 * crypto-engine queue.
468 * @dev: the device attached with one hardware engine
469 * @retry_support: whether hardware has support for retry mechanism
470 * @cbk_do_batch: pointer to a callback function to be invoked when executing
471 * a batch of requests.
472 * This has the form:
473 * callback(struct crypto_engine *engine)
474 * where:
475 * @engine: the crypto engine structure.
476 * @rt: whether this queue is set to run as a realtime task
477 * @qlen: maximum size of the crypto-engine queue
478 *
479 * This must be called from context that can sleep.
480 * Return: the crypto engine structure on success, else NULL.
481 */
crypto_engine_alloc_init_and_set(struct device * dev,bool retry_support,int (* cbk_do_batch)(struct crypto_engine * engine),bool rt,int qlen)482 struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
483 bool retry_support,
484 int (*cbk_do_batch)(struct crypto_engine *engine),
485 bool rt, int qlen)
486 {
487 struct crypto_engine *engine;
488
489 if (!dev)
490 return NULL;
491
492 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
493 if (!engine)
494 return NULL;
495
496 engine->dev = dev;
497 engine->rt = rt;
498 engine->running = false;
499 engine->busy = false;
500 engine->idling = false;
501 engine->retry_support = retry_support;
502 engine->priv_data = dev;
503 /*
504 * Batch requests is possible only if
505 * hardware has support for retry mechanism.
506 */
507 engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
508
509 snprintf(engine->name, sizeof(engine->name),
510 "%s-engine", dev_name(dev));
511
512 crypto_init_queue(&engine->queue, qlen);
513 spin_lock_init(&engine->queue_lock);
514
515 engine->kworker = kthread_create_worker(0, "%s", engine->name);
516 if (IS_ERR(engine->kworker)) {
517 dev_err(dev, "failed to create crypto request pump task\n");
518 return NULL;
519 }
520 kthread_init_work(&engine->pump_requests, crypto_pump_work);
521
522 if (engine->rt) {
523 dev_info(dev, "will run requests pump with realtime priority\n");
524 sched_set_fifo(engine->kworker->task);
525 }
526
527 return engine;
528 }
529 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
530
531 /**
532 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
533 * initialize it.
534 * @dev: the device attached with one hardware engine
535 * @rt: whether this queue is set to run as a realtime task
536 *
537 * This must be called from context that can sleep.
538 * Return: the crypto engine structure on success, else NULL.
539 */
crypto_engine_alloc_init(struct device * dev,bool rt)540 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
541 {
542 return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
543 CRYPTO_ENGINE_MAX_QLEN);
544 }
545 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
546
547 /**
548 * crypto_engine_exit - free the resources of hardware engine when exit
549 * @engine: the hardware engine need to be freed
550 *
551 * Return 0 for success.
552 */
crypto_engine_exit(struct crypto_engine * engine)553 int crypto_engine_exit(struct crypto_engine *engine)
554 {
555 int ret;
556
557 ret = crypto_engine_stop(engine);
558 if (ret)
559 return ret;
560
561 kthread_destroy_worker(engine->kworker);
562
563 return 0;
564 }
565 EXPORT_SYMBOL_GPL(crypto_engine_exit);
566
567 MODULE_LICENSE("GPL");
568 MODULE_DESCRIPTION("Crypto hardware engine framework");
569