• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/delay.h>
4 #include "adf_accel_devices.h"
5 #include "adf_transport_internal.h"
6 #include "adf_transport_access_macros.h"
7 #include "adf_cfg.h"
8 #include "adf_common_drv.h"
9 
adf_modulo(u32 data,u32 shift)10 static inline u32 adf_modulo(u32 data, u32 shift)
11 {
12 	u32 div = data >> shift;
13 	u32 mult = div << shift;
14 
15 	return data - mult;
16 }
17 
adf_check_ring_alignment(u64 addr,u64 size)18 static inline int adf_check_ring_alignment(u64 addr, u64 size)
19 {
20 	if (((size - 1) & addr) != 0)
21 		return -EFAULT;
22 	return 0;
23 }
24 
adf_verify_ring_size(u32 msg_size,u32 msg_num)25 static int adf_verify_ring_size(u32 msg_size, u32 msg_num)
26 {
27 	int i = ADF_MIN_RING_SIZE;
28 
29 	for (; i <= ADF_MAX_RING_SIZE; i++)
30 		if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
31 			return i;
32 
33 	return ADF_DEFAULT_RING_SIZE;
34 }
35 
adf_reserve_ring(struct adf_etr_bank_data * bank,u32 ring)36 static int adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring)
37 {
38 	spin_lock(&bank->lock);
39 	if (bank->ring_mask & (1 << ring)) {
40 		spin_unlock(&bank->lock);
41 		return -EFAULT;
42 	}
43 	bank->ring_mask |= (1 << ring);
44 	spin_unlock(&bank->lock);
45 	return 0;
46 }
47 
adf_unreserve_ring(struct adf_etr_bank_data * bank,u32 ring)48 static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring)
49 {
50 	spin_lock(&bank->lock);
51 	bank->ring_mask &= ~(1 << ring);
52 	spin_unlock(&bank->lock);
53 }
54 
adf_enable_ring_irq(struct adf_etr_bank_data * bank,u32 ring)55 static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
56 {
57 	spin_lock_bh(&bank->lock);
58 	bank->irq_mask |= (1 << ring);
59 	spin_unlock_bh(&bank->lock);
60 	WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
61 	WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number,
62 			      bank->irq_coalesc_timer);
63 }
64 
adf_disable_ring_irq(struct adf_etr_bank_data * bank,u32 ring)65 static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
66 {
67 	spin_lock_bh(&bank->lock);
68 	bank->irq_mask &= ~(1 << ring);
69 	spin_unlock_bh(&bank->lock);
70 	WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
71 }
72 
adf_send_message(struct adf_etr_ring_data * ring,u32 * msg)73 int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
74 {
75 	if (atomic_add_return(1, ring->inflights) >
76 	    ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
77 		atomic_dec(ring->inflights);
78 		return -EAGAIN;
79 	}
80 	spin_lock_bh(&ring->lock);
81 	memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg,
82 	       ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
83 
84 	ring->tail = adf_modulo(ring->tail +
85 				ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
86 				ADF_RING_SIZE_MODULO(ring->ring_size));
87 	WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number,
88 			    ring->ring_number, ring->tail);
89 	spin_unlock_bh(&ring->lock);
90 	return 0;
91 }
92 
adf_handle_response(struct adf_etr_ring_data * ring)93 static int adf_handle_response(struct adf_etr_ring_data *ring)
94 {
95 	u32 msg_counter = 0;
96 	u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
97 
98 	while (*msg != ADF_RING_EMPTY_SIG) {
99 		ring->callback((u32 *)msg);
100 		atomic_dec(ring->inflights);
101 		*msg = ADF_RING_EMPTY_SIG;
102 		ring->head = adf_modulo(ring->head +
103 					ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
104 					ADF_RING_SIZE_MODULO(ring->ring_size));
105 		msg_counter++;
106 		msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
107 	}
108 	if (msg_counter > 0)
109 		WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
110 				    ring->bank->bank_number,
111 				    ring->ring_number, ring->head);
112 	return 0;
113 }
114 
adf_configure_tx_ring(struct adf_etr_ring_data * ring)115 static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
116 {
117 	u32 ring_config = BUILD_RING_CONFIG(ring->ring_size);
118 
119 	WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
120 			      ring->ring_number, ring_config);
121 }
122 
adf_configure_rx_ring(struct adf_etr_ring_data * ring)123 static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
124 {
125 	u32 ring_config =
126 			BUILD_RESP_RING_CONFIG(ring->ring_size,
127 					       ADF_RING_NEAR_WATERMARK_512,
128 					       ADF_RING_NEAR_WATERMARK_0);
129 
130 	WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
131 			      ring->ring_number, ring_config);
132 }
133 
adf_init_ring(struct adf_etr_ring_data * ring)134 static int adf_init_ring(struct adf_etr_ring_data *ring)
135 {
136 	struct adf_etr_bank_data *bank = ring->bank;
137 	struct adf_accel_dev *accel_dev = bank->accel_dev;
138 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
139 	u64 ring_base;
140 	u32 ring_size_bytes =
141 			ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
142 
143 	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
144 	ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
145 					     ring_size_bytes, &ring->dma_addr,
146 					     GFP_KERNEL);
147 	if (!ring->base_addr)
148 		return -ENOMEM;
149 
150 	memset(ring->base_addr, 0x7F, ring_size_bytes);
151 	/* The base_addr has to be aligned to the size of the buffer */
152 	if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
153 		dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
154 		dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
155 				  ring->base_addr, ring->dma_addr);
156 		ring->base_addr = NULL;
157 		return -EFAULT;
158 	}
159 
160 	if (hw_data->tx_rings_mask & (1 << ring->ring_number))
161 		adf_configure_tx_ring(ring);
162 
163 	else
164 		adf_configure_rx_ring(ring);
165 
166 	ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
167 	WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number,
168 			    ring->ring_number, ring_base);
169 	spin_lock_init(&ring->lock);
170 	return 0;
171 }
172 
adf_cleanup_ring(struct adf_etr_ring_data * ring)173 static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
174 {
175 	u32 ring_size_bytes =
176 			ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
177 	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
178 
179 	if (ring->base_addr) {
180 		memset(ring->base_addr, 0x7F, ring_size_bytes);
181 		dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
182 				  ring_size_bytes, ring->base_addr,
183 				  ring->dma_addr);
184 	}
185 }
186 
adf_create_ring(struct adf_accel_dev * accel_dev,const char * section,u32 bank_num,u32 num_msgs,u32 msg_size,const char * ring_name,adf_callback_fn callback,int poll_mode,struct adf_etr_ring_data ** ring_ptr)187 int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
188 		    u32 bank_num, u32 num_msgs,
189 		    u32 msg_size, const char *ring_name,
190 		    adf_callback_fn callback, int poll_mode,
191 		    struct adf_etr_ring_data **ring_ptr)
192 {
193 	struct adf_etr_data *transport_data = accel_dev->transport;
194 	struct adf_etr_bank_data *bank;
195 	struct adf_etr_ring_data *ring;
196 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
197 	u32 ring_num;
198 	int ret;
199 
200 	if (bank_num >= GET_MAX_BANKS(accel_dev)) {
201 		dev_err(&GET_DEV(accel_dev), "Invalid bank number\n");
202 		return -EFAULT;
203 	}
204 	if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
205 		dev_err(&GET_DEV(accel_dev), "Invalid msg size\n");
206 		return -EFAULT;
207 	}
208 	if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
209 			      ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
210 		dev_err(&GET_DEV(accel_dev),
211 			"Invalid ring size for given msg size\n");
212 		return -EFAULT;
213 	}
214 	if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
215 		dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n",
216 			section, ring_name);
217 		return -EFAULT;
218 	}
219 	if (kstrtouint(val, 10, &ring_num)) {
220 		dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
221 		return -EFAULT;
222 	}
223 	if (ring_num >= ADF_ETR_MAX_RINGS_PER_BANK) {
224 		dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
225 		return -EFAULT;
226 	}
227 
228 	bank = &transport_data->banks[bank_num];
229 	if (adf_reserve_ring(bank, ring_num)) {
230 		dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
231 			ring_num, ring_name);
232 		return -EFAULT;
233 	}
234 	ring = &bank->rings[ring_num];
235 	ring->ring_number = ring_num;
236 	ring->bank = bank;
237 	ring->callback = callback;
238 	ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
239 	ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
240 	ring->head = 0;
241 	ring->tail = 0;
242 	atomic_set(ring->inflights, 0);
243 	ret = adf_init_ring(ring);
244 	if (ret)
245 		goto err;
246 
247 	/* Enable HW arbitration for the given ring */
248 	adf_update_ring_arb(ring);
249 
250 	if (adf_ring_debugfs_add(ring, ring_name)) {
251 		dev_err(&GET_DEV(accel_dev),
252 			"Couldn't add ring debugfs entry\n");
253 		ret = -EFAULT;
254 		goto err;
255 	}
256 
257 	/* Enable interrupts if needed */
258 	if (callback && (!poll_mode))
259 		adf_enable_ring_irq(bank, ring->ring_number);
260 	*ring_ptr = ring;
261 	return 0;
262 err:
263 	adf_cleanup_ring(ring);
264 	adf_unreserve_ring(bank, ring_num);
265 	adf_update_ring_arb(ring);
266 	return ret;
267 }
268 
adf_remove_ring(struct adf_etr_ring_data * ring)269 void adf_remove_ring(struct adf_etr_ring_data *ring)
270 {
271 	struct adf_etr_bank_data *bank = ring->bank;
272 
273 	/* Disable interrupts for the given ring */
274 	adf_disable_ring_irq(bank, ring->ring_number);
275 
276 	/* Clear PCI config space */
277 	WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number,
278 			      ring->ring_number, 0);
279 	WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number,
280 			    ring->ring_number, 0);
281 	adf_ring_debugfs_rm(ring);
282 	adf_unreserve_ring(bank, ring->ring_number);
283 	/* Disable HW arbitration for the given ring */
284 	adf_update_ring_arb(ring);
285 	adf_cleanup_ring(ring);
286 }
287 
adf_ring_response_handler(struct adf_etr_bank_data * bank)288 static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
289 {
290 	u32 empty_rings, i;
291 
292 	empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
293 	empty_rings = ~empty_rings & bank->irq_mask;
294 
295 	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) {
296 		if (empty_rings & (1 << i))
297 			adf_handle_response(&bank->rings[i]);
298 	}
299 }
300 
adf_response_handler(uintptr_t bank_addr)301 void adf_response_handler(uintptr_t bank_addr)
302 {
303 	struct adf_etr_bank_data *bank = (void *)bank_addr;
304 
305 	/* Handle all the responses and reenable IRQs */
306 	adf_ring_response_handler(bank);
307 	WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
308 				   bank->irq_mask);
309 }
310 
adf_get_cfg_int(struct adf_accel_dev * accel_dev,const char * section,const char * format,u32 key,u32 * value)311 static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
312 				  const char *section, const char *format,
313 				  u32 key, u32 *value)
314 {
315 	char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
316 	char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
317 
318 	snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
319 
320 	if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
321 		return -EFAULT;
322 
323 	if (kstrtouint(val_buf, 10, value))
324 		return -EFAULT;
325 	return 0;
326 }
327 
adf_get_coalesc_timer(struct adf_etr_bank_data * bank,const char * section,u32 bank_num_in_accel)328 static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
329 				  const char *section,
330 				  u32 bank_num_in_accel)
331 {
332 	if (adf_get_cfg_int(bank->accel_dev, section,
333 			    ADF_ETRMGR_COALESCE_TIMER_FORMAT,
334 			    bank_num_in_accel, &bank->irq_coalesc_timer))
335 		bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
336 
337 	if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
338 	    ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
339 		bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
340 }
341 
adf_init_bank(struct adf_accel_dev * accel_dev,struct adf_etr_bank_data * bank,u32 bank_num,void __iomem * csr_addr)342 static int adf_init_bank(struct adf_accel_dev *accel_dev,
343 			 struct adf_etr_bank_data *bank,
344 			 u32 bank_num, void __iomem *csr_addr)
345 {
346 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
347 	struct adf_etr_ring_data *ring;
348 	struct adf_etr_ring_data *tx_ring;
349 	u32 i, coalesc_enabled = 0;
350 
351 	memset(bank, 0, sizeof(*bank));
352 	bank->bank_number = bank_num;
353 	bank->csr_addr = csr_addr;
354 	bank->accel_dev = accel_dev;
355 	spin_lock_init(&bank->lock);
356 
357 	/* Enable IRQ coalescing always. This will allow to use
358 	 * the optimised flag and coalesc register.
359 	 * If it is disabled in the config file just use min time value */
360 	if ((adf_get_cfg_int(accel_dev, "Accelerator0",
361 			     ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
362 			     &coalesc_enabled) == 0) && coalesc_enabled)
363 		adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
364 	else
365 		bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
366 
367 	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
368 		WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
369 		WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
370 		ring = &bank->rings[i];
371 		if (hw_data->tx_rings_mask & (1 << i)) {
372 			ring->inflights =
373 				kzalloc_node(sizeof(atomic_t),
374 					     GFP_KERNEL,
375 					     dev_to_node(&GET_DEV(accel_dev)));
376 			if (!ring->inflights)
377 				goto err;
378 		} else {
379 			if (i < hw_data->tx_rx_gap) {
380 				dev_err(&GET_DEV(accel_dev),
381 					"Invalid tx rings mask config\n");
382 				goto err;
383 			}
384 			tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
385 			ring->inflights = tx_ring->inflights;
386 		}
387 	}
388 	if (adf_bank_debugfs_add(bank)) {
389 		dev_err(&GET_DEV(accel_dev),
390 			"Failed to add bank debugfs entry\n");
391 		goto err;
392 	}
393 
394 	WRITE_CSR_INT_FLAG(csr_addr, bank_num, ADF_BANK_INT_FLAG_CLEAR_MASK);
395 	WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
396 	return 0;
397 err:
398 	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
399 		ring = &bank->rings[i];
400 		if (hw_data->tx_rings_mask & (1 << i))
401 			kfree(ring->inflights);
402 	}
403 	return -ENOMEM;
404 }
405 
406 /**
407  * adf_init_etr_data() - Initialize transport rings for acceleration device
408  * @accel_dev:  Pointer to acceleration device.
409  *
410  * Function is the initializes the communications channels (rings) to the
411  * acceleration device accel_dev.
412  * To be used by QAT device specific drivers.
413  *
414  * Return: 0 on success, error code otherwise.
415  */
adf_init_etr_data(struct adf_accel_dev * accel_dev)416 int adf_init_etr_data(struct adf_accel_dev *accel_dev)
417 {
418 	struct adf_etr_data *etr_data;
419 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
420 	void __iomem *csr_addr;
421 	u32 size;
422 	u32 num_banks = 0;
423 	int i, ret;
424 
425 	etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
426 				dev_to_node(&GET_DEV(accel_dev)));
427 	if (!etr_data)
428 		return -ENOMEM;
429 
430 	num_banks = GET_MAX_BANKS(accel_dev);
431 	size = num_banks * sizeof(struct adf_etr_bank_data);
432 	etr_data->banks = kzalloc_node(size, GFP_KERNEL,
433 				       dev_to_node(&GET_DEV(accel_dev)));
434 	if (!etr_data->banks) {
435 		ret = -ENOMEM;
436 		goto err_bank;
437 	}
438 
439 	accel_dev->transport = etr_data;
440 	i = hw_data->get_etr_bar_id(hw_data);
441 	csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
442 
443 	/* accel_dev->debugfs_dir should always be non-NULL here */
444 	etr_data->debug = debugfs_create_dir("transport",
445 					     accel_dev->debugfs_dir);
446 
447 	for (i = 0; i < num_banks; i++) {
448 		ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
449 				    csr_addr);
450 		if (ret)
451 			goto err_bank_all;
452 	}
453 
454 	return 0;
455 
456 err_bank_all:
457 	debugfs_remove(etr_data->debug);
458 	kfree(etr_data->banks);
459 err_bank:
460 	kfree(etr_data);
461 	accel_dev->transport = NULL;
462 	return ret;
463 }
464 EXPORT_SYMBOL_GPL(adf_init_etr_data);
465 
cleanup_bank(struct adf_etr_bank_data * bank)466 static void cleanup_bank(struct adf_etr_bank_data *bank)
467 {
468 	u32 i;
469 
470 	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
471 		struct adf_accel_dev *accel_dev = bank->accel_dev;
472 		struct adf_hw_device_data *hw_data = accel_dev->hw_device;
473 		struct adf_etr_ring_data *ring = &bank->rings[i];
474 
475 		if (bank->ring_mask & (1 << i))
476 			adf_cleanup_ring(ring);
477 
478 		if (hw_data->tx_rings_mask & (1 << i))
479 			kfree(ring->inflights);
480 	}
481 	adf_bank_debugfs_rm(bank);
482 	memset(bank, 0, sizeof(*bank));
483 }
484 
adf_cleanup_etr_handles(struct adf_accel_dev * accel_dev)485 static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
486 {
487 	struct adf_etr_data *etr_data = accel_dev->transport;
488 	u32 i, num_banks = GET_MAX_BANKS(accel_dev);
489 
490 	for (i = 0; i < num_banks; i++)
491 		cleanup_bank(&etr_data->banks[i]);
492 }
493 
494 /**
495  * adf_cleanup_etr_data() - Clear transport rings for acceleration device
496  * @accel_dev:  Pointer to acceleration device.
497  *
498  * Function is the clears the communications channels (rings) of the
499  * acceleration device accel_dev.
500  * To be used by QAT device specific drivers.
501  *
502  * Return: void
503  */
adf_cleanup_etr_data(struct adf_accel_dev * accel_dev)504 void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
505 {
506 	struct adf_etr_data *etr_data = accel_dev->transport;
507 
508 	if (etr_data) {
509 		adf_cleanup_etr_handles(accel_dev);
510 		debugfs_remove(etr_data->debug);
511 		kfree(etr_data->banks);
512 		kfree(etr_data);
513 		accel_dev->transport = NULL;
514 	}
515 }
516 EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
517