1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/bitops.h>
6 #include <linux/delay.h>
7 #include "adf_accel_devices.h"
8 #include "adf_cfg.h"
9 #include "adf_common_drv.h"
10
11 static LIST_HEAD(service_table);
12 static DEFINE_MUTEX(service_lock);
13
adf_service_add(struct service_hndl * service)14 static void adf_service_add(struct service_hndl *service)
15 {
16 mutex_lock(&service_lock);
17 list_add(&service->list, &service_table);
18 mutex_unlock(&service_lock);
19 }
20
adf_service_register(struct service_hndl * service)21 int adf_service_register(struct service_hndl *service)
22 {
23 memset(service->init_status, 0, sizeof(service->init_status));
24 memset(service->start_status, 0, sizeof(service->start_status));
25 adf_service_add(service);
26 return 0;
27 }
28
adf_service_remove(struct service_hndl * service)29 static void adf_service_remove(struct service_hndl *service)
30 {
31 mutex_lock(&service_lock);
32 list_del(&service->list);
33 mutex_unlock(&service_lock);
34 }
35
adf_service_unregister(struct service_hndl * service)36 int adf_service_unregister(struct service_hndl *service)
37 {
38 int i;
39
40 for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
41 if (service->init_status[i] || service->start_status[i]) {
42 pr_err("QAT: Could not remove active service\n");
43 return -EFAULT;
44 }
45 }
46 adf_service_remove(service);
47 return 0;
48 }
49
50 /**
51 * adf_dev_init() - Init data structures and services for the given accel device
52 * @accel_dev: Pointer to acceleration device.
53 *
54 * Initialize the ring data structures and the admin comms and arbitration
55 * services.
56 *
57 * Return: 0 on success, error code otherwise.
58 */
adf_dev_init(struct adf_accel_dev * accel_dev)59 int adf_dev_init(struct adf_accel_dev *accel_dev)
60 {
61 struct service_hndl *service;
62 struct list_head *list_itr;
63 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
64 int ret;
65
66 if (!hw_data) {
67 dev_err(&GET_DEV(accel_dev),
68 "Failed to init device - hw_data not set\n");
69 return -EFAULT;
70 }
71
72 if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
73 !accel_dev->is_vf) {
74 dev_err(&GET_DEV(accel_dev), "Device not configured\n");
75 return -EFAULT;
76 }
77
78 if (adf_init_etr_data(accel_dev)) {
79 dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
80 return -EFAULT;
81 }
82
83 if (hw_data->init_device && hw_data->init_device(accel_dev)) {
84 dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n");
85 return -EFAULT;
86 }
87
88 if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
89 dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
90 return -EFAULT;
91 }
92
93 if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
94 dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
95 return -EFAULT;
96 }
97
98 if (adf_ae_init(accel_dev)) {
99 dev_err(&GET_DEV(accel_dev),
100 "Failed to initialise Acceleration Engine\n");
101 return -EFAULT;
102 }
103 set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
104
105 if (adf_ae_fw_load(accel_dev)) {
106 dev_err(&GET_DEV(accel_dev),
107 "Failed to load acceleration FW\n");
108 return -EFAULT;
109 }
110 set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
111
112 if (hw_data->alloc_irq(accel_dev)) {
113 dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n");
114 return -EFAULT;
115 }
116 set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
117
118 hw_data->enable_ints(accel_dev);
119 hw_data->enable_error_correction(accel_dev);
120
121 ret = hw_data->pfvf_ops.enable_comms(accel_dev);
122 if (ret)
123 return ret;
124
125 if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
126 accel_dev->is_vf) {
127 if (qat_crypto_vf_dev_config(accel_dev))
128 return -EFAULT;
129 }
130
131 /*
132 * Subservice initialisation is divided into two stages: init and start.
133 * This is to facilitate any ordering dependencies between services
134 * prior to starting any of the accelerators.
135 */
136 list_for_each(list_itr, &service_table) {
137 service = list_entry(list_itr, struct service_hndl, list);
138 if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
139 dev_err(&GET_DEV(accel_dev),
140 "Failed to initialise service %s\n",
141 service->name);
142 return -EFAULT;
143 }
144 set_bit(accel_dev->accel_id, service->init_status);
145 }
146
147 return 0;
148 }
149 EXPORT_SYMBOL_GPL(adf_dev_init);
150
151 /**
152 * adf_dev_start() - Start acceleration service for the given accel device
153 * @accel_dev: Pointer to acceleration device.
154 *
155 * Function notifies all the registered services that the acceleration device
156 * is ready to be used.
157 * To be used by QAT device specific drivers.
158 *
159 * Return: 0 on success, error code otherwise.
160 */
adf_dev_start(struct adf_accel_dev * accel_dev)161 int adf_dev_start(struct adf_accel_dev *accel_dev)
162 {
163 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
164 struct service_hndl *service;
165 struct list_head *list_itr;
166
167 set_bit(ADF_STATUS_STARTING, &accel_dev->status);
168
169 if (adf_ae_start(accel_dev)) {
170 dev_err(&GET_DEV(accel_dev), "AE Start Failed\n");
171 return -EFAULT;
172 }
173 set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
174
175 if (hw_data->send_admin_init(accel_dev)) {
176 dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
177 return -EFAULT;
178 }
179
180 /* Set ssm watch dog timer */
181 if (hw_data->set_ssm_wdtimer)
182 hw_data->set_ssm_wdtimer(accel_dev);
183
184 /* Enable Power Management */
185 if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) {
186 dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n");
187 return -EFAULT;
188 }
189
190 list_for_each(list_itr, &service_table) {
191 service = list_entry(list_itr, struct service_hndl, list);
192 if (service->event_hld(accel_dev, ADF_EVENT_START)) {
193 dev_err(&GET_DEV(accel_dev),
194 "Failed to start service %s\n",
195 service->name);
196 return -EFAULT;
197 }
198 set_bit(accel_dev->accel_id, service->start_status);
199 }
200
201 clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
202 set_bit(ADF_STATUS_STARTED, &accel_dev->status);
203
204 if (!list_empty(&accel_dev->crypto_list) &&
205 (qat_algs_register() || qat_asym_algs_register())) {
206 dev_err(&GET_DEV(accel_dev),
207 "Failed to register crypto algs\n");
208 set_bit(ADF_STATUS_STARTING, &accel_dev->status);
209 clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
210 return -EFAULT;
211 }
212 set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
213
214 return 0;
215 }
216 EXPORT_SYMBOL_GPL(adf_dev_start);
217
218 /**
219 * adf_dev_stop() - Stop acceleration service for the given accel device
220 * @accel_dev: Pointer to acceleration device.
221 *
222 * Function notifies all the registered services that the acceleration device
223 * is shuting down.
224 * To be used by QAT device specific drivers.
225 *
226 * Return: void
227 */
adf_dev_stop(struct adf_accel_dev * accel_dev)228 void adf_dev_stop(struct adf_accel_dev *accel_dev)
229 {
230 struct service_hndl *service;
231 struct list_head *list_itr;
232 bool wait = false;
233 int ret;
234
235 if (!adf_dev_started(accel_dev) &&
236 !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
237 return;
238
239 clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
240 clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
241
242 if (!list_empty(&accel_dev->crypto_list) &&
243 test_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status)) {
244 qat_algs_unregister();
245 qat_asym_algs_unregister();
246 }
247 clear_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
248
249 list_for_each(list_itr, &service_table) {
250 service = list_entry(list_itr, struct service_hndl, list);
251 if (!test_bit(accel_dev->accel_id, service->start_status))
252 continue;
253 ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
254 if (!ret) {
255 clear_bit(accel_dev->accel_id, service->start_status);
256 } else if (ret == -EAGAIN) {
257 wait = true;
258 clear_bit(accel_dev->accel_id, service->start_status);
259 }
260 }
261
262 if (wait)
263 msleep(100);
264
265 if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
266 if (adf_ae_stop(accel_dev))
267 dev_err(&GET_DEV(accel_dev), "failed to stop AE\n");
268 else
269 clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
270 }
271 }
272 EXPORT_SYMBOL_GPL(adf_dev_stop);
273
274 /**
275 * adf_dev_shutdown() - shutdown acceleration services and data strucutures
276 * @accel_dev: Pointer to acceleration device
277 *
278 * Cleanup the ring data structures and the admin comms and arbitration
279 * services.
280 */
adf_dev_shutdown(struct adf_accel_dev * accel_dev)281 void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
282 {
283 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
284 struct service_hndl *service;
285 struct list_head *list_itr;
286
287 if (!hw_data) {
288 dev_err(&GET_DEV(accel_dev),
289 "QAT: Failed to shutdown device - hw_data not set\n");
290 return;
291 }
292
293 if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
294 adf_ae_fw_release(accel_dev);
295 clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
296 }
297
298 if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
299 if (adf_ae_shutdown(accel_dev))
300 dev_err(&GET_DEV(accel_dev),
301 "Failed to shutdown Accel Engine\n");
302 else
303 clear_bit(ADF_STATUS_AE_INITIALISED,
304 &accel_dev->status);
305 }
306
307 list_for_each(list_itr, &service_table) {
308 service = list_entry(list_itr, struct service_hndl, list);
309 if (!test_bit(accel_dev->accel_id, service->init_status))
310 continue;
311 if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
312 dev_err(&GET_DEV(accel_dev),
313 "Failed to shutdown service %s\n",
314 service->name);
315 else
316 clear_bit(accel_dev->accel_id, service->init_status);
317 }
318
319 hw_data->disable_iov(accel_dev);
320
321 if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
322 hw_data->free_irq(accel_dev);
323 clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
324 }
325
326 /* Delete configuration only if not restarting */
327 if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
328 adf_cfg_del_all(accel_dev);
329
330 if (hw_data->exit_arb)
331 hw_data->exit_arb(accel_dev);
332
333 if (hw_data->exit_admin_comms)
334 hw_data->exit_admin_comms(accel_dev);
335
336 adf_cleanup_etr_data(accel_dev);
337 adf_dev_restore(accel_dev);
338 }
339 EXPORT_SYMBOL_GPL(adf_dev_shutdown);
340
adf_dev_restarting_notify(struct adf_accel_dev * accel_dev)341 int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
342 {
343 struct service_hndl *service;
344 struct list_head *list_itr;
345
346 list_for_each(list_itr, &service_table) {
347 service = list_entry(list_itr, struct service_hndl, list);
348 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
349 dev_err(&GET_DEV(accel_dev),
350 "Failed to restart service %s.\n",
351 service->name);
352 }
353 return 0;
354 }
355
adf_dev_restarted_notify(struct adf_accel_dev * accel_dev)356 int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
357 {
358 struct service_hndl *service;
359 struct list_head *list_itr;
360
361 list_for_each(list_itr, &service_table) {
362 service = list_entry(list_itr, struct service_hndl, list);
363 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
364 dev_err(&GET_DEV(accel_dev),
365 "Failed to restart service %s.\n",
366 service->name);
367 }
368 return 0;
369 }
370
adf_dev_shutdown_cache_cfg(struct adf_accel_dev * accel_dev)371 int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
372 {
373 char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
374 int ret;
375
376 ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
377 ADF_SERVICES_ENABLED, services);
378
379 adf_dev_stop(accel_dev);
380 adf_dev_shutdown(accel_dev);
381
382 if (!ret) {
383 ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
384 if (ret)
385 return ret;
386
387 ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
388 ADF_SERVICES_ENABLED,
389 services, ADF_STR);
390 if (ret)
391 return ret;
392 }
393
394 return 0;
395 }
396
adf_dev_down(struct adf_accel_dev * accel_dev,bool reconfig)397 int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
398 {
399 int ret = 0;
400
401 if (!accel_dev)
402 return -EINVAL;
403
404 mutex_lock(&accel_dev->state_lock);
405
406 if (!adf_dev_started(accel_dev)) {
407 dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
408 accel_dev->accel_id);
409 ret = -EINVAL;
410 goto out;
411 }
412
413 if (reconfig) {
414 ret = adf_dev_shutdown_cache_cfg(accel_dev);
415 goto out;
416 }
417
418 adf_dev_stop(accel_dev);
419 adf_dev_shutdown(accel_dev);
420
421 out:
422 mutex_unlock(&accel_dev->state_lock);
423 return ret;
424 }
425 EXPORT_SYMBOL_GPL(adf_dev_down);
426
adf_dev_up(struct adf_accel_dev * accel_dev,bool config)427 int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
428 {
429 int ret = 0;
430
431 if (!accel_dev)
432 return -EINVAL;
433
434 mutex_lock(&accel_dev->state_lock);
435
436 if (adf_dev_started(accel_dev)) {
437 dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
438 accel_dev->accel_id);
439 ret = -EALREADY;
440 goto out;
441 }
442
443 if (config && GET_HW_DATA(accel_dev)->dev_config) {
444 ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
445 if (unlikely(ret))
446 goto out;
447 }
448
449 ret = adf_dev_init(accel_dev);
450 if (unlikely(ret))
451 goto out;
452
453 ret = adf_dev_start(accel_dev);
454
455 out:
456 mutex_unlock(&accel_dev->state_lock);
457 return ret;
458 }
459 EXPORT_SYMBOL_GPL(adf_dev_up);
460