1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include "adf_accel_devices.h"
6 #include "adf_common_drv.h"
7 #include "adf_transport.h"
8 #include "adf_transport_access_macros.h"
9 #include "adf_cfg.h"
10 #include "adf_cfg_strings.h"
11 #include "qat_crypto.h"
12 #include "icp_qat_fw.h"
13
14 #define SEC ADF_KERNEL_SEC
15
16 static struct service_hndl qat_crypto;
17
qat_crypto_put_instance(struct qat_crypto_instance * inst)18 void qat_crypto_put_instance(struct qat_crypto_instance *inst)
19 {
20 atomic_dec(&inst->refctr);
21 adf_dev_put(inst->accel_dev);
22 }
23
qat_crypto_free_instances(struct adf_accel_dev * accel_dev)24 static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
25 {
26 struct qat_crypto_instance *inst, *tmp;
27 int i;
28
29 list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) {
30 for (i = 0; i < atomic_read(&inst->refctr); i++)
31 qat_crypto_put_instance(inst);
32
33 if (inst->sym_tx)
34 adf_remove_ring(inst->sym_tx);
35
36 if (inst->sym_rx)
37 adf_remove_ring(inst->sym_rx);
38
39 if (inst->pke_tx)
40 adf_remove_ring(inst->pke_tx);
41
42 if (inst->pke_rx)
43 adf_remove_ring(inst->pke_rx);
44
45 list_del(&inst->list);
46 kfree(inst);
47 }
48 return 0;
49 }
50
qat_crypto_get_instance_node(int node)51 struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
52 {
53 struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
54 struct qat_crypto_instance *inst = NULL, *tmp_inst;
55 unsigned long best = ~0;
56
57 list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
58 unsigned long ctr;
59
60 if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
61 dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
62 adf_dev_started(tmp_dev) &&
63 !list_empty(&tmp_dev->crypto_list)) {
64 ctr = atomic_read(&tmp_dev->ref_count);
65 if (best > ctr) {
66 accel_dev = tmp_dev;
67 best = ctr;
68 }
69 }
70 }
71
72 if (!accel_dev) {
73 pr_info("QAT: Could not find a device on node %d\n", node);
74 /* Get any started device */
75 list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
76 if (adf_dev_started(tmp_dev) &&
77 !list_empty(&tmp_dev->crypto_list)) {
78 accel_dev = tmp_dev;
79 break;
80 }
81 }
82 }
83
84 if (!accel_dev)
85 return NULL;
86
87 best = ~0;
88 list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) {
89 unsigned long ctr;
90
91 ctr = atomic_read(&tmp_inst->refctr);
92 if (best > ctr) {
93 inst = tmp_inst;
94 best = ctr;
95 }
96 }
97 if (inst) {
98 if (adf_dev_get(accel_dev)) {
99 dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
100 return NULL;
101 }
102 atomic_inc(&inst->refctr);
103 }
104 return inst;
105 }
106
107 /**
108 * qat_crypto_dev_config() - create dev config required to create crypto inst.
109 *
110 * @accel_dev: Pointer to acceleration device.
111 *
112 * Function creates device configuration required to create crypto instances
113 *
114 * Return: 0 on success, error code otherwise.
115 */
qat_crypto_dev_config(struct adf_accel_dev * accel_dev)116 int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
117 {
118 int cpus = num_online_cpus();
119 int banks = GET_MAX_BANKS(accel_dev);
120 int instances = min(cpus, banks);
121 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
122 int i;
123 unsigned long val;
124
125 if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
126 goto err;
127 if (adf_cfg_section_add(accel_dev, "Accelerator0"))
128 goto err;
129
130 /* Temporarily set the number of crypto instances to zero to avoid
131 * registering the crypto algorithms.
132 * This will be removed when the algorithms will support the
133 * CRYPTO_TFM_REQ_MAY_BACKLOG flag
134 */
135 instances = 0;
136
137 for (i = 0; i < instances; i++) {
138 val = i;
139 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
140 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
141 key, (void *)&val, ADF_DEC))
142 goto err;
143
144 snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
145 i);
146 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
147 key, (void *)&val, ADF_DEC))
148 goto err;
149
150 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
151 val = 128;
152 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
153 key, (void *)&val, ADF_DEC))
154 goto err;
155
156 val = 512;
157 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
158 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
159 key, (void *)&val, ADF_DEC))
160 goto err;
161
162 val = 0;
163 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
164 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
165 key, (void *)&val, ADF_DEC))
166 goto err;
167
168 val = 2;
169 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
170 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
171 key, (void *)&val, ADF_DEC))
172 goto err;
173
174 val = 8;
175 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
176 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
177 key, (void *)&val, ADF_DEC))
178 goto err;
179
180 val = 10;
181 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
182 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
183 key, (void *)&val, ADF_DEC))
184 goto err;
185
186 val = ADF_COALESCING_DEF_TIME;
187 snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
188 if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
189 key, (void *)&val, ADF_DEC))
190 goto err;
191 }
192
193 val = i;
194 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
195 ADF_NUM_CY, (void *)&val, ADF_DEC))
196 goto err;
197
198 set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
199 return 0;
200 err:
201 dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
202 return -EINVAL;
203 }
204 EXPORT_SYMBOL_GPL(qat_crypto_dev_config);
205
qat_crypto_create_instances(struct adf_accel_dev * accel_dev)206 static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
207 {
208 int i;
209 unsigned long bank;
210 unsigned long num_inst, num_msg_sym, num_msg_asym;
211 int msg_size;
212 struct qat_crypto_instance *inst;
213 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
214 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
215
216 INIT_LIST_HEAD(&accel_dev->crypto_list);
217 if (adf_cfg_get_param_value(accel_dev, SEC, ADF_NUM_CY, val))
218 return -EFAULT;
219
220 if (kstrtoul(val, 0, &num_inst))
221 return -EFAULT;
222
223 for (i = 0; i < num_inst; i++) {
224 inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
225 dev_to_node(&GET_DEV(accel_dev)));
226 if (!inst)
227 goto err;
228
229 list_add_tail(&inst->list, &accel_dev->crypto_list);
230 inst->id = i;
231 atomic_set(&inst->refctr, 0);
232 inst->accel_dev = accel_dev;
233 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
234 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
235 goto err;
236
237 if (kstrtoul(val, 10, &bank))
238 goto err;
239 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
240 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
241 goto err;
242
243 if (kstrtoul(val, 10, &num_msg_sym))
244 goto err;
245
246 num_msg_sym = num_msg_sym >> 1;
247
248 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
249 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
250 goto err;
251
252 if (kstrtoul(val, 10, &num_msg_asym))
253 goto err;
254 num_msg_asym = num_msg_asym >> 1;
255
256 msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
257 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
258 if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
259 msg_size, key, NULL, 0, &inst->sym_tx))
260 goto err;
261
262 msg_size = msg_size >> 1;
263 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
264 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
265 msg_size, key, NULL, 0, &inst->pke_tx))
266 goto err;
267
268 msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
269 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
270 if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
271 msg_size, key, qat_alg_callback, 0,
272 &inst->sym_rx))
273 goto err;
274
275 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
276 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
277 msg_size, key, qat_alg_asym_callback, 0,
278 &inst->pke_rx))
279 goto err;
280 }
281 return 0;
282 err:
283 qat_crypto_free_instances(accel_dev);
284 return -ENOMEM;
285 }
286
qat_crypto_init(struct adf_accel_dev * accel_dev)287 static int qat_crypto_init(struct adf_accel_dev *accel_dev)
288 {
289 if (qat_crypto_create_instances(accel_dev))
290 return -EFAULT;
291
292 return 0;
293 }
294
qat_crypto_shutdown(struct adf_accel_dev * accel_dev)295 static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
296 {
297 return qat_crypto_free_instances(accel_dev);
298 }
299
qat_crypto_event_handler(struct adf_accel_dev * accel_dev,enum adf_event event)300 static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
301 enum adf_event event)
302 {
303 int ret;
304
305 switch (event) {
306 case ADF_EVENT_INIT:
307 ret = qat_crypto_init(accel_dev);
308 break;
309 case ADF_EVENT_SHUTDOWN:
310 ret = qat_crypto_shutdown(accel_dev);
311 break;
312 case ADF_EVENT_RESTARTING:
313 case ADF_EVENT_RESTARTED:
314 case ADF_EVENT_START:
315 case ADF_EVENT_STOP:
316 default:
317 ret = 0;
318 }
319 return ret;
320 }
321
qat_crypto_register(void)322 int qat_crypto_register(void)
323 {
324 memset(&qat_crypto, 0, sizeof(qat_crypto));
325 qat_crypto.event_hld = qat_crypto_event_handler;
326 qat_crypto.name = "qat_crypto";
327 return adf_service_register(&qat_crypto);
328 }
329
qat_crypto_unregister(void)330 int qat_crypto_unregister(void)
331 {
332 return adf_service_unregister(&qat_crypto);
333 }
334