1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include "adf_accel_devices.h"
6 #include "adf_common_drv.h"
7 #include "adf_transport.h"
8 #include "adf_cfg.h"
9 #include "adf_cfg_strings.h"
10 #include "adf_gen2_hw_data.h"
11 #include "qat_crypto.h"
12 #include "icp_qat_fw.h"
13
14 #define SEC ADF_KERNEL_SEC
15
16 static struct service_hndl qat_crypto;
17
qat_crypto_put_instance(struct qat_crypto_instance * inst)18 void qat_crypto_put_instance(struct qat_crypto_instance *inst)
19 {
20 atomic_dec(&inst->refctr);
21 adf_dev_put(inst->accel_dev);
22 }
23
qat_crypto_free_instances(struct adf_accel_dev * accel_dev)24 static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
25 {
26 struct qat_crypto_instance *inst, *tmp;
27 int i;
28
29 list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) {
30 for (i = 0; i < atomic_read(&inst->refctr); i++)
31 qat_crypto_put_instance(inst);
32
33 if (inst->sym_tx)
34 adf_remove_ring(inst->sym_tx);
35
36 if (inst->sym_rx)
37 adf_remove_ring(inst->sym_rx);
38
39 if (inst->pke_tx)
40 adf_remove_ring(inst->pke_tx);
41
42 if (inst->pke_rx)
43 adf_remove_ring(inst->pke_rx);
44
45 list_del(&inst->list);
46 kfree(inst);
47 }
48 return 0;
49 }
50
qat_crypto_get_instance_node(int node)51 struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
52 {
53 struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
54 struct qat_crypto_instance *inst = NULL, *tmp_inst;
55 unsigned long best = ~0;
56
57 list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
58 unsigned long ctr;
59
60 if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
61 dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
62 adf_dev_started(tmp_dev) &&
63 !list_empty(&tmp_dev->crypto_list)) {
64 ctr = atomic_read(&tmp_dev->ref_count);
65 if (best > ctr) {
66 accel_dev = tmp_dev;
67 best = ctr;
68 }
69 }
70 }
71
72 if (!accel_dev) {
73 pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
74 /* Get any started device */
75 list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
76 if (adf_dev_started(tmp_dev) &&
77 !list_empty(&tmp_dev->crypto_list)) {
78 accel_dev = tmp_dev;
79 break;
80 }
81 }
82 }
83
84 if (!accel_dev)
85 return NULL;
86
87 best = ~0;
88 list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) {
89 unsigned long ctr;
90
91 ctr = atomic_read(&tmp_inst->refctr);
92 if (best > ctr) {
93 inst = tmp_inst;
94 best = ctr;
95 }
96 }
97 if (inst) {
98 if (adf_dev_get(accel_dev)) {
99 dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
100 return NULL;
101 }
102 atomic_inc(&inst->refctr);
103 }
104 return inst;
105 }
106
107 /**
108 * qat_crypto_vf_dev_config()
109 * create dev config required to create crypto inst.
110 *
111 * @accel_dev: Pointer to acceleration device.
112 *
113 * Function creates device configuration required to create
114 * asym, sym or, crypto instances
115 *
116 * Return: 0 on success, error code otherwise.
117 */
qat_crypto_vf_dev_config(struct adf_accel_dev * accel_dev)118 int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev)
119 {
120 u16 ring_to_svc_map = GET_HW_DATA(accel_dev)->ring_to_svc_map;
121
122 if (ring_to_svc_map != ADF_GEN2_DEFAULT_RING_TO_SRV_MAP) {
123 dev_err(&GET_DEV(accel_dev),
124 "Unsupported ring/service mapping present on PF");
125 return -EFAULT;
126 }
127
128 return GET_HW_DATA(accel_dev)->dev_config(accel_dev);
129 }
130
qat_crypto_create_instances(struct adf_accel_dev * accel_dev)131 static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
132 {
133 unsigned long num_inst, num_msg_sym, num_msg_asym;
134 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
135 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
136 unsigned long sym_bank, asym_bank;
137 struct qat_crypto_instance *inst;
138 int msg_size;
139 int ret;
140 int i;
141
142 INIT_LIST_HEAD(&accel_dev->crypto_list);
143 ret = adf_cfg_get_param_value(accel_dev, SEC, ADF_NUM_CY, val);
144 if (ret)
145 return ret;
146
147 ret = kstrtoul(val, 0, &num_inst);
148 if (ret)
149 return ret;
150
151 for (i = 0; i < num_inst; i++) {
152 inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
153 dev_to_node(&GET_DEV(accel_dev)));
154 if (!inst) {
155 ret = -ENOMEM;
156 goto err;
157 }
158
159 list_add_tail(&inst->list, &accel_dev->crypto_list);
160 inst->id = i;
161 atomic_set(&inst->refctr, 0);
162 inst->accel_dev = accel_dev;
163
164 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
165 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
166 if (ret)
167 goto err;
168
169 ret = kstrtoul(val, 10, &sym_bank);
170 if (ret)
171 goto err;
172
173 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
174 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
175 if (ret)
176 goto err;
177
178 ret = kstrtoul(val, 10, &asym_bank);
179 if (ret)
180 goto err;
181
182 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
183 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
184 if (ret)
185 goto err;
186
187 ret = kstrtoul(val, 10, &num_msg_sym);
188 if (ret)
189 goto err;
190
191 num_msg_sym = num_msg_sym >> 1;
192
193 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
194 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
195 if (ret)
196 goto err;
197
198 ret = kstrtoul(val, 10, &num_msg_asym);
199 if (ret)
200 goto err;
201 num_msg_asym = num_msg_asym >> 1;
202
203 msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
204 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
205 ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
206 msg_size, key, NULL, 0, &inst->sym_tx);
207 if (ret)
208 goto err;
209
210 msg_size = msg_size >> 1;
211 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
212 ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
213 msg_size, key, NULL, 0, &inst->pke_tx);
214 if (ret)
215 goto err;
216
217 msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
218 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
219 ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
220 msg_size, key, qat_alg_callback, 0,
221 &inst->sym_rx);
222 if (ret)
223 goto err;
224
225 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
226 ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
227 msg_size, key, qat_alg_asym_callback, 0,
228 &inst->pke_rx);
229 if (ret)
230 goto err;
231
232 INIT_LIST_HEAD(&inst->backlog.list);
233 spin_lock_init(&inst->backlog.lock);
234 }
235 return 0;
236 err:
237 qat_crypto_free_instances(accel_dev);
238 return ret;
239 }
240
qat_crypto_init(struct adf_accel_dev * accel_dev)241 static int qat_crypto_init(struct adf_accel_dev *accel_dev)
242 {
243 if (qat_crypto_create_instances(accel_dev))
244 return -EFAULT;
245
246 return 0;
247 }
248
qat_crypto_shutdown(struct adf_accel_dev * accel_dev)249 static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
250 {
251 return qat_crypto_free_instances(accel_dev);
252 }
253
qat_crypto_event_handler(struct adf_accel_dev * accel_dev,enum adf_event event)254 static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
255 enum adf_event event)
256 {
257 int ret;
258
259 switch (event) {
260 case ADF_EVENT_INIT:
261 ret = qat_crypto_init(accel_dev);
262 break;
263 case ADF_EVENT_SHUTDOWN:
264 ret = qat_crypto_shutdown(accel_dev);
265 break;
266 case ADF_EVENT_RESTARTING:
267 case ADF_EVENT_RESTARTED:
268 case ADF_EVENT_START:
269 case ADF_EVENT_STOP:
270 default:
271 ret = 0;
272 }
273 return ret;
274 }
275
qat_crypto_register(void)276 int qat_crypto_register(void)
277 {
278 memset(&qat_crypto, 0, sizeof(qat_crypto));
279 qat_crypto.event_hld = qat_crypto_event_handler;
280 qat_crypto.name = "qat_crypto";
281 return adf_service_register(&qat_crypto);
282 }
283
qat_crypto_unregister(void)284 int qat_crypto_unregister(void)
285 {
286 return adf_service_unregister(&qat_crypto);
287 }
288