1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2022 Intel Corporation */
3 #include "adf_transport.h"
4 #include "qat_algs_send.h"
5 #include "qat_crypto.h"
6
7 #define ADF_MAX_RETRIES 20
8
qat_alg_send_message_retry(struct qat_alg_req * req)9 static int qat_alg_send_message_retry(struct qat_alg_req *req)
10 {
11 int ret = 0, ctr = 0;
12
13 do {
14 ret = adf_send_message(req->tx_ring, req->fw_req);
15 } while (ret == -EAGAIN && ctr++ < ADF_MAX_RETRIES);
16
17 if (ret == -EAGAIN)
18 return -ENOSPC;
19
20 return -EINPROGRESS;
21 }
22
qat_alg_send_backlog(struct qat_instance_backlog * backlog)23 void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
24 {
25 struct qat_alg_req *req, *tmp;
26
27 spin_lock_bh(&backlog->lock);
28 list_for_each_entry_safe(req, tmp, &backlog->list, list) {
29 if (adf_send_message(req->tx_ring, req->fw_req)) {
30 /* The HW ring is full. Do nothing.
31 * qat_alg_send_backlog() will be invoked again by
32 * another callback.
33 */
34 break;
35 }
36 list_del(&req->list);
37 req->base->complete(req->base, -EINPROGRESS);
38 }
39 spin_unlock_bh(&backlog->lock);
40 }
41
qat_alg_try_enqueue(struct qat_alg_req * req)42 static bool qat_alg_try_enqueue(struct qat_alg_req *req)
43 {
44 struct qat_instance_backlog *backlog = req->backlog;
45 struct adf_etr_ring_data *tx_ring = req->tx_ring;
46 u32 *fw_req = req->fw_req;
47
48 /* Check if any request is already backlogged */
49 if (!list_empty(&backlog->list))
50 return false;
51
52 /* Check if ring is nearly full */
53 if (adf_ring_nearly_full(tx_ring))
54 return false;
55
56 /* Try to enqueue to HW ring */
57 if (adf_send_message(tx_ring, fw_req))
58 return false;
59
60 return true;
61 }
62
63
qat_alg_send_message_maybacklog(struct qat_alg_req * req)64 static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
65 {
66 struct qat_instance_backlog *backlog = req->backlog;
67 int ret = -EINPROGRESS;
68
69 if (qat_alg_try_enqueue(req))
70 return ret;
71
72 spin_lock_bh(&backlog->lock);
73 if (!qat_alg_try_enqueue(req)) {
74 list_add_tail(&req->list, &backlog->list);
75 ret = -EBUSY;
76 }
77 spin_unlock_bh(&backlog->lock);
78
79 return ret;
80 }
81
qat_alg_send_message(struct qat_alg_req * req)82 int qat_alg_send_message(struct qat_alg_req *req)
83 {
84 u32 flags = req->base->flags;
85
86 if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
87 return qat_alg_send_message_maybacklog(req);
88 else
89 return qat_alg_send_message_retry(req);
90 }
91