1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/bitops.h>
6 #include <linux/delay.h>
7 #include "adf_accel_devices.h"
8 #include "adf_cfg.h"
9 #include "adf_common_drv.h"
10 #include "adf_dbgfs.h"
11 #include "adf_heartbeat.h"
12
13 static LIST_HEAD(service_table);
14 static DEFINE_MUTEX(service_lock);
15
adf_service_add(struct service_hndl * service)16 static void adf_service_add(struct service_hndl *service)
17 {
18 mutex_lock(&service_lock);
19 list_add(&service->list, &service_table);
20 mutex_unlock(&service_lock);
21 }
22
adf_service_register(struct service_hndl * service)23 int adf_service_register(struct service_hndl *service)
24 {
25 memset(service->init_status, 0, sizeof(service->init_status));
26 memset(service->start_status, 0, sizeof(service->start_status));
27 adf_service_add(service);
28 return 0;
29 }
30
adf_service_remove(struct service_hndl * service)31 static void adf_service_remove(struct service_hndl *service)
32 {
33 mutex_lock(&service_lock);
34 list_del(&service->list);
35 mutex_unlock(&service_lock);
36 }
37
adf_service_unregister(struct service_hndl * service)38 int adf_service_unregister(struct service_hndl *service)
39 {
40 int i;
41
42 for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
43 if (service->init_status[i] || service->start_status[i]) {
44 pr_err("QAT: Could not remove active service\n");
45 return -EFAULT;
46 }
47 }
48 adf_service_remove(service);
49 return 0;
50 }
51
52 /**
53 * adf_dev_init() - Init data structures and services for the given accel device
54 * @accel_dev: Pointer to acceleration device.
55 *
56 * Initialize the ring data structures and the admin comms and arbitration
57 * services.
58 *
59 * Return: 0 on success, error code otherwise.
60 */
adf_dev_init(struct adf_accel_dev * accel_dev)61 static int adf_dev_init(struct adf_accel_dev *accel_dev)
62 {
63 struct service_hndl *service;
64 struct list_head *list_itr;
65 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
66 int ret;
67
68 if (!hw_data) {
69 dev_err(&GET_DEV(accel_dev),
70 "Failed to init device - hw_data not set\n");
71 return -EFAULT;
72 }
73
74 if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
75 !accel_dev->is_vf) {
76 dev_err(&GET_DEV(accel_dev), "Device not configured\n");
77 return -EFAULT;
78 }
79
80 if (adf_init_etr_data(accel_dev)) {
81 dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
82 return -EFAULT;
83 }
84
85 if (hw_data->init_device && hw_data->init_device(accel_dev)) {
86 dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n");
87 return -EFAULT;
88 }
89
90 if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
91 dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
92 return -EFAULT;
93 }
94
95 if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
96 dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
97 return -EFAULT;
98 }
99
100 if (hw_data->get_ring_to_svc_map)
101 hw_data->ring_to_svc_map = hw_data->get_ring_to_svc_map(accel_dev);
102
103 if (adf_ae_init(accel_dev)) {
104 dev_err(&GET_DEV(accel_dev),
105 "Failed to initialise Acceleration Engine\n");
106 return -EFAULT;
107 }
108 set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
109
110 if (adf_ae_fw_load(accel_dev)) {
111 dev_err(&GET_DEV(accel_dev),
112 "Failed to load acceleration FW\n");
113 return -EFAULT;
114 }
115 set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
116
117 if (hw_data->alloc_irq(accel_dev)) {
118 dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n");
119 return -EFAULT;
120 }
121 set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
122
123 hw_data->enable_ints(accel_dev);
124 hw_data->enable_error_correction(accel_dev);
125
126 ret = hw_data->pfvf_ops.enable_comms(accel_dev);
127 if (ret)
128 return ret;
129
130 if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
131 accel_dev->is_vf) {
132 if (qat_crypto_vf_dev_config(accel_dev))
133 return -EFAULT;
134 }
135
136 adf_heartbeat_init(accel_dev);
137
138 /*
139 * Subservice initialisation is divided into two stages: init and start.
140 * This is to facilitate any ordering dependencies between services
141 * prior to starting any of the accelerators.
142 */
143 list_for_each(list_itr, &service_table) {
144 service = list_entry(list_itr, struct service_hndl, list);
145 if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
146 dev_err(&GET_DEV(accel_dev),
147 "Failed to initialise service %s\n",
148 service->name);
149 return -EFAULT;
150 }
151 set_bit(accel_dev->accel_id, service->init_status);
152 }
153
154 return 0;
155 }
156
157 /**
158 * adf_dev_start() - Start acceleration service for the given accel device
159 * @accel_dev: Pointer to acceleration device.
160 *
161 * Function notifies all the registered services that the acceleration device
162 * is ready to be used.
163 * To be used by QAT device specific drivers.
164 *
165 * Return: 0 on success, error code otherwise.
166 */
adf_dev_start(struct adf_accel_dev * accel_dev)167 static int adf_dev_start(struct adf_accel_dev *accel_dev)
168 {
169 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
170 struct service_hndl *service;
171 struct list_head *list_itr;
172 int ret;
173
174 set_bit(ADF_STATUS_STARTING, &accel_dev->status);
175
176 if (adf_ae_start(accel_dev)) {
177 dev_err(&GET_DEV(accel_dev), "AE Start Failed\n");
178 return -EFAULT;
179 }
180 set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
181
182 if (hw_data->send_admin_init(accel_dev)) {
183 dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
184 return -EFAULT;
185 }
186
187 if (hw_data->measure_clock) {
188 ret = hw_data->measure_clock(accel_dev);
189 if (ret) {
190 dev_err(&GET_DEV(accel_dev), "Failed measure device clock\n");
191 return ret;
192 }
193 }
194
195 /* Set ssm watch dog timer */
196 if (hw_data->set_ssm_wdtimer)
197 hw_data->set_ssm_wdtimer(accel_dev);
198
199 /* Enable Power Management */
200 if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) {
201 dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n");
202 return -EFAULT;
203 }
204
205 if (hw_data->start_timer) {
206 ret = hw_data->start_timer(accel_dev);
207 if (ret) {
208 dev_err(&GET_DEV(accel_dev), "Failed to start internal sync timer\n");
209 return ret;
210 }
211 }
212
213 adf_heartbeat_start(accel_dev);
214
215 list_for_each(list_itr, &service_table) {
216 service = list_entry(list_itr, struct service_hndl, list);
217 if (service->event_hld(accel_dev, ADF_EVENT_START)) {
218 dev_err(&GET_DEV(accel_dev),
219 "Failed to start service %s\n",
220 service->name);
221 return -EFAULT;
222 }
223 set_bit(accel_dev->accel_id, service->start_status);
224 }
225
226 clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
227 set_bit(ADF_STATUS_STARTED, &accel_dev->status);
228
229 if (!list_empty(&accel_dev->crypto_list) &&
230 (qat_algs_register() || qat_asym_algs_register())) {
231 dev_err(&GET_DEV(accel_dev),
232 "Failed to register crypto algs\n");
233 set_bit(ADF_STATUS_STARTING, &accel_dev->status);
234 clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
235 return -EFAULT;
236 }
237 set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
238
239 if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
240 dev_err(&GET_DEV(accel_dev),
241 "Failed to register compression algs\n");
242 set_bit(ADF_STATUS_STARTING, &accel_dev->status);
243 clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
244 return -EFAULT;
245 }
246 set_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
247
248 adf_dbgfs_add(accel_dev);
249
250 return 0;
251 }
252
253 /**
254 * adf_dev_stop() - Stop acceleration service for the given accel device
255 * @accel_dev: Pointer to acceleration device.
256 *
257 * Function notifies all the registered services that the acceleration device
258 * is shuting down.
259 * To be used by QAT device specific drivers.
260 *
261 * Return: void
262 */
adf_dev_stop(struct adf_accel_dev * accel_dev)263 static void adf_dev_stop(struct adf_accel_dev *accel_dev)
264 {
265 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
266 struct service_hndl *service;
267 struct list_head *list_itr;
268 bool wait = false;
269 int ret;
270
271 if (!adf_dev_started(accel_dev) &&
272 !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
273 return;
274
275 adf_dbgfs_rm(accel_dev);
276
277 clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
278 clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
279
280 if (!list_empty(&accel_dev->crypto_list) &&
281 test_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status)) {
282 qat_algs_unregister();
283 qat_asym_algs_unregister();
284 }
285 clear_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
286
287 if (!list_empty(&accel_dev->compression_list) &&
288 test_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status))
289 qat_comp_algs_unregister();
290 clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
291
292 list_for_each(list_itr, &service_table) {
293 service = list_entry(list_itr, struct service_hndl, list);
294 if (!test_bit(accel_dev->accel_id, service->start_status))
295 continue;
296 ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
297 if (!ret) {
298 clear_bit(accel_dev->accel_id, service->start_status);
299 } else if (ret == -EAGAIN) {
300 wait = true;
301 clear_bit(accel_dev->accel_id, service->start_status);
302 }
303 }
304
305 if (hw_data->stop_timer)
306 hw_data->stop_timer(accel_dev);
307
308 if (wait)
309 msleep(100);
310
311 if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
312 if (adf_ae_stop(accel_dev))
313 dev_err(&GET_DEV(accel_dev), "failed to stop AE\n");
314 else
315 clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
316 }
317 }
318
319 /**
320 * adf_dev_shutdown() - shutdown acceleration services and data strucutures
321 * @accel_dev: Pointer to acceleration device
322 *
323 * Cleanup the ring data structures and the admin comms and arbitration
324 * services.
325 */
adf_dev_shutdown(struct adf_accel_dev * accel_dev)326 static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
327 {
328 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
329 struct service_hndl *service;
330 struct list_head *list_itr;
331
332 if (!hw_data) {
333 dev_err(&GET_DEV(accel_dev),
334 "QAT: Failed to shutdown device - hw_data not set\n");
335 return;
336 }
337
338 if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
339 adf_ae_fw_release(accel_dev);
340 clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
341 }
342
343 if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
344 if (adf_ae_shutdown(accel_dev))
345 dev_err(&GET_DEV(accel_dev),
346 "Failed to shutdown Accel Engine\n");
347 else
348 clear_bit(ADF_STATUS_AE_INITIALISED,
349 &accel_dev->status);
350 }
351
352 list_for_each(list_itr, &service_table) {
353 service = list_entry(list_itr, struct service_hndl, list);
354 if (!test_bit(accel_dev->accel_id, service->init_status))
355 continue;
356 if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
357 dev_err(&GET_DEV(accel_dev),
358 "Failed to shutdown service %s\n",
359 service->name);
360 else
361 clear_bit(accel_dev->accel_id, service->init_status);
362 }
363
364 adf_heartbeat_shutdown(accel_dev);
365
366 hw_data->disable_iov(accel_dev);
367
368 if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
369 hw_data->free_irq(accel_dev);
370 clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
371 }
372
373 /* Delete configuration only if not restarting */
374 if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
375 adf_cfg_del_all(accel_dev);
376
377 if (hw_data->exit_arb)
378 hw_data->exit_arb(accel_dev);
379
380 if (hw_data->exit_admin_comms)
381 hw_data->exit_admin_comms(accel_dev);
382
383 adf_cleanup_etr_data(accel_dev);
384 adf_dev_restore(accel_dev);
385 }
386
adf_dev_restarting_notify(struct adf_accel_dev * accel_dev)387 int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
388 {
389 struct service_hndl *service;
390 struct list_head *list_itr;
391
392 list_for_each(list_itr, &service_table) {
393 service = list_entry(list_itr, struct service_hndl, list);
394 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
395 dev_err(&GET_DEV(accel_dev),
396 "Failed to restart service %s.\n",
397 service->name);
398 }
399 return 0;
400 }
401
adf_dev_restarted_notify(struct adf_accel_dev * accel_dev)402 int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
403 {
404 struct service_hndl *service;
405 struct list_head *list_itr;
406
407 list_for_each(list_itr, &service_table) {
408 service = list_entry(list_itr, struct service_hndl, list);
409 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
410 dev_err(&GET_DEV(accel_dev),
411 "Failed to restart service %s.\n",
412 service->name);
413 }
414 return 0;
415 }
416
adf_dev_shutdown_cache_cfg(struct adf_accel_dev * accel_dev)417 static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
418 {
419 char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
420 int ret;
421
422 ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
423 ADF_SERVICES_ENABLED, services);
424
425 adf_dev_stop(accel_dev);
426 adf_dev_shutdown(accel_dev);
427
428 if (!ret) {
429 ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
430 if (ret)
431 return ret;
432
433 ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
434 ADF_SERVICES_ENABLED,
435 services, ADF_STR);
436 if (ret)
437 return ret;
438 }
439
440 return 0;
441 }
442
adf_dev_down(struct adf_accel_dev * accel_dev,bool reconfig)443 int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
444 {
445 int ret = 0;
446
447 if (!accel_dev)
448 return -EINVAL;
449
450 mutex_lock(&accel_dev->state_lock);
451
452 if (reconfig) {
453 ret = adf_dev_shutdown_cache_cfg(accel_dev);
454 goto out;
455 }
456
457 adf_dev_stop(accel_dev);
458 adf_dev_shutdown(accel_dev);
459
460 out:
461 mutex_unlock(&accel_dev->state_lock);
462 return ret;
463 }
464 EXPORT_SYMBOL_GPL(adf_dev_down);
465
adf_dev_up(struct adf_accel_dev * accel_dev,bool config)466 int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
467 {
468 int ret = 0;
469
470 if (!accel_dev)
471 return -EINVAL;
472
473 mutex_lock(&accel_dev->state_lock);
474
475 if (adf_dev_started(accel_dev)) {
476 dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
477 accel_dev->accel_id);
478 ret = -EALREADY;
479 goto out;
480 }
481
482 if (config && GET_HW_DATA(accel_dev)->dev_config) {
483 ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
484 if (unlikely(ret))
485 goto out;
486 }
487
488 ret = adf_dev_init(accel_dev);
489 if (unlikely(ret))
490 goto out;
491
492 ret = adf_dev_start(accel_dev);
493
494 out:
495 mutex_unlock(&accel_dev->state_lock);
496 return ret;
497 }
498 EXPORT_SYMBOL_GPL(adf_dev_up);
499
adf_dev_restart(struct adf_accel_dev * accel_dev)500 int adf_dev_restart(struct adf_accel_dev *accel_dev)
501 {
502 int ret = 0;
503
504 if (!accel_dev)
505 return -EFAULT;
506
507 adf_dev_down(accel_dev, false);
508
509 ret = adf_dev_up(accel_dev, false);
510 /* if device is already up return success*/
511 if (ret == -EALREADY)
512 return 0;
513
514 return ret;
515 }
516 EXPORT_SYMBOL_GPL(adf_dev_restart);
517