• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Amir Hanania <amir.hanania@intel.com>
8  *  Haijun Liu <haijun.liu@mediatek.com>
9  *  Eliot Lee <eliot.lee@intel.com>
10  *  Moises Veleta <moises.veleta@intel.com>
11  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
12  *
13  * Contributors:
14  *  Andy Shevchenko <andriy.shevchenko@linux.intel.com>
15  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
16  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
17  */
18 
19 #include <linux/atomic.h>
20 #include <linux/bitfield.h>
21 #include <linux/bitops.h>
22 #include <linux/device.h>
23 #include <linux/dma-direction.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/gfp.h>
26 #include <linux/err.h>
27 #include <linux/iopoll.h>
28 #include <linux/jiffies.h>
29 #include <linux/kernel.h>
30 #include <linux/kthread.h>
31 #include <linux/list.h>
32 #include <linux/minmax.h>
33 #include <linux/mm.h>
34 #include <linux/netdevice.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/sched.h>
37 #include <linux/skbuff.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/string.h>
41 #include <linux/types.h>
42 #include <linux/wait.h>
43 #include <linux/workqueue.h>
44 
45 #include "t7xx_dpmaif.h"
46 #include "t7xx_hif_dpmaif.h"
47 #include "t7xx_hif_dpmaif_rx.h"
48 #include "t7xx_pci.h"
49 
50 #define DPMAIF_BAT_COUNT		8192
51 #define DPMAIF_FRG_COUNT		4814
52 #define DPMAIF_PIT_COUNT		(DPMAIF_BAT_COUNT * 2)
53 
54 #define DPMAIF_BAT_CNT_THRESHOLD	30
55 #define DPMAIF_PIT_CNT_THRESHOLD	60
56 #define DPMAIF_RX_PUSH_THRESHOLD_MASK	GENMASK(2, 0)
57 #define DPMAIF_NOTIFY_RELEASE_COUNT	128
58 #define DPMAIF_POLL_PIT_TIME_US		20
59 #define DPMAIF_POLL_PIT_MAX_TIME_US	2000
60 #define DPMAIF_WQ_TIME_LIMIT_MS		2
61 #define DPMAIF_CS_RESULT_PASS		0
62 
63 /* Packet type */
64 #define DES_PT_PD			0
65 #define DES_PT_MSG			1
66 /* Buffer type */
67 #define PKT_BUF_FRAG			1
68 
t7xx_normal_pit_bid(const struct dpmaif_pit * pit_info)69 static unsigned int t7xx_normal_pit_bid(const struct dpmaif_pit *pit_info)
70 {
71 	u32 value;
72 
73 	value = FIELD_GET(PD_PIT_H_BID, le32_to_cpu(pit_info->pd.footer));
74 	value <<= 13;
75 	value += FIELD_GET(PD_PIT_BUFFER_ID, le32_to_cpu(pit_info->header));
76 	return value;
77 }
78 
t7xx_dpmaif_net_rx_push_thread(void * arg)79 static int t7xx_dpmaif_net_rx_push_thread(void *arg)
80 {
81 	struct dpmaif_rx_queue *q = arg;
82 	struct dpmaif_ctrl *hif_ctrl;
83 	struct dpmaif_callbacks *cb;
84 
85 	hif_ctrl = q->dpmaif_ctrl;
86 	cb = hif_ctrl->callbacks;
87 
88 	while (!kthread_should_stop()) {
89 		struct sk_buff *skb;
90 		unsigned long flags;
91 
92 		if (skb_queue_empty(&q->skb_list)) {
93 			if (wait_event_interruptible(q->rx_wq,
94 						     !skb_queue_empty(&q->skb_list) ||
95 						     kthread_should_stop()))
96 				continue;
97 
98 			if (kthread_should_stop())
99 				break;
100 		}
101 
102 		spin_lock_irqsave(&q->skb_list.lock, flags);
103 		skb = __skb_dequeue(&q->skb_list);
104 		spin_unlock_irqrestore(&q->skb_list.lock, flags);
105 
106 		if (!skb)
107 			continue;
108 
109 		cb->recv_skb(hif_ctrl->t7xx_dev, skb);
110 		cond_resched();
111 	}
112 
113 	return 0;
114 }
115 
t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl * dpmaif_ctrl,const unsigned int q_num,const unsigned int bat_cnt)116 static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl,
117 					 const unsigned int q_num, const unsigned int bat_cnt)
118 {
119 	struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
120 	struct dpmaif_bat_request *bat_req = rxq->bat_req;
121 	unsigned int old_rl_idx, new_wr_idx, old_wr_idx;
122 
123 	if (!rxq->que_started) {
124 		dev_err(dpmaif_ctrl->dev, "RX queue %d has not been started\n", rxq->index);
125 		return -EINVAL;
126 	}
127 
128 	old_rl_idx = bat_req->bat_release_rd_idx;
129 	old_wr_idx = bat_req->bat_wr_idx;
130 	new_wr_idx = old_wr_idx + bat_cnt;
131 
132 	if (old_rl_idx > old_wr_idx && new_wr_idx >= old_rl_idx)
133 		goto err_flow;
134 
135 	if (new_wr_idx >= bat_req->bat_size_cnt) {
136 		new_wr_idx -= bat_req->bat_size_cnt;
137 		if (new_wr_idx >= old_rl_idx)
138 			goto err_flow;
139 	}
140 
141 	bat_req->bat_wr_idx = new_wr_idx;
142 	return 0;
143 
144 err_flow:
145 	dev_err(dpmaif_ctrl->dev, "RX BAT flow check fail\n");
146 	return -EINVAL;
147 }
148 
t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl * dpmaif_ctrl,const unsigned int size,struct dpmaif_bat_skb * cur_skb)149 static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl,
150 					const unsigned int size, struct dpmaif_bat_skb *cur_skb)
151 {
152 	dma_addr_t data_bus_addr;
153 	struct sk_buff *skb;
154 
155 	skb = __dev_alloc_skb(size, GFP_KERNEL);
156 	if (!skb)
157 		return false;
158 
159 	data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, size, DMA_FROM_DEVICE);
160 	if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) {
161 		dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n");
162 		dev_kfree_skb_any(skb);
163 		return false;
164 	}
165 
166 	cur_skb->skb = skb;
167 	cur_skb->data_bus_addr = data_bus_addr;
168 	cur_skb->data_len = size;
169 
170 	return true;
171 }
172 
t7xx_unmap_bat_skb(struct device * dev,struct dpmaif_bat_skb * bat_skb_base,unsigned int index)173 static void t7xx_unmap_bat_skb(struct device *dev, struct dpmaif_bat_skb *bat_skb_base,
174 			       unsigned int index)
175 {
176 	struct dpmaif_bat_skb *bat_skb = bat_skb_base + index;
177 
178 	if (bat_skb->skb) {
179 		dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
180 		dev_kfree_skb(bat_skb->skb);
181 		bat_skb->skb = NULL;
182 	}
183 }
184 
185 /**
186  * t7xx_dpmaif_rx_buf_alloc() - Allocate buffers for the BAT ring.
187  * @dpmaif_ctrl: Pointer to DPMAIF context structure.
188  * @bat_req: Pointer to BAT request structure.
189  * @q_num: Queue number.
190  * @buf_cnt: Number of buffers to allocate.
191  * @initial: Indicates if the ring is being populated for the first time.
192  *
193  * Allocate skb and store the start address of the data buffer into the BAT ring.
194  * If this is not the initial call, notify the HW about the new entries.
195  *
196  * Return:
197  * * 0		- Success.
198  * * -ERROR	- Error code.
199  */
t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl * dpmaif_ctrl,const struct dpmaif_bat_request * bat_req,const unsigned int q_num,const unsigned int buf_cnt,const bool initial)200 int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl,
201 			     const struct dpmaif_bat_request *bat_req,
202 			     const unsigned int q_num, const unsigned int buf_cnt,
203 			     const bool initial)
204 {
205 	unsigned int i, bat_cnt, bat_max_cnt, bat_start_idx;
206 	int ret;
207 
208 	if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
209 		return -EINVAL;
210 
211 	/* Check BAT buffer space */
212 	bat_max_cnt = bat_req->bat_size_cnt;
213 
214 	bat_cnt = t7xx_ring_buf_rd_wr_count(bat_max_cnt, bat_req->bat_release_rd_idx,
215 					    bat_req->bat_wr_idx, DPMAIF_WRITE);
216 	if (buf_cnt > bat_cnt)
217 		return -ENOMEM;
218 
219 	bat_start_idx = bat_req->bat_wr_idx;
220 
221 	for (i = 0; i < buf_cnt; i++) {
222 		unsigned int cur_bat_idx = bat_start_idx + i;
223 		struct dpmaif_bat_skb *cur_skb;
224 		struct dpmaif_bat *cur_bat;
225 
226 		if (cur_bat_idx >= bat_max_cnt)
227 			cur_bat_idx -= bat_max_cnt;
228 
229 		cur_skb = (struct dpmaif_bat_skb *)bat_req->bat_skb + cur_bat_idx;
230 		if (!cur_skb->skb &&
231 		    !t7xx_alloc_and_map_skb_info(dpmaif_ctrl, bat_req->pkt_buf_sz, cur_skb))
232 			break;
233 
234 		cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
235 		cur_bat->buffer_addr_ext = upper_32_bits(cur_skb->data_bus_addr);
236 		cur_bat->p_buffer_addr = lower_32_bits(cur_skb->data_bus_addr);
237 	}
238 
239 	if (!i)
240 		return -ENOMEM;
241 
242 	ret = t7xx_dpmaif_update_bat_wr_idx(dpmaif_ctrl, q_num, i);
243 	if (ret)
244 		goto err_unmap_skbs;
245 
246 	if (!initial) {
247 		unsigned int hw_wr_idx;
248 
249 		ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(&dpmaif_ctrl->hw_info, i);
250 		if (ret)
251 			goto err_unmap_skbs;
252 
253 		hw_wr_idx = t7xx_dpmaif_dl_get_bat_wr_idx(&dpmaif_ctrl->hw_info,
254 							  DPF_RX_QNO_DFT);
255 		if (hw_wr_idx != bat_req->bat_wr_idx) {
256 			ret = -EFAULT;
257 			dev_err(dpmaif_ctrl->dev, "Write index mismatch in RX ring\n");
258 			goto err_unmap_skbs;
259 		}
260 	}
261 
262 	return 0;
263 
264 err_unmap_skbs:
265 	while (--i > 0)
266 		t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
267 
268 	return ret;
269 }
270 
t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue * rxq,const unsigned int rel_entry_num)271 static int t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue *rxq,
272 					  const unsigned int rel_entry_num)
273 {
274 	struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
275 	unsigned int old_rel_idx, new_rel_idx, hw_wr_idx;
276 	int ret;
277 
278 	if (!rxq->que_started)
279 		return 0;
280 
281 	if (rel_entry_num >= rxq->pit_size_cnt) {
282 		dev_err(rxq->dpmaif_ctrl->dev, "Invalid PIT release index\n");
283 		return -EINVAL;
284 	}
285 
286 	old_rel_idx = rxq->pit_release_rd_idx;
287 	new_rel_idx = old_rel_idx + rel_entry_num;
288 	hw_wr_idx = rxq->pit_wr_idx;
289 	if (hw_wr_idx < old_rel_idx && new_rel_idx >= rxq->pit_size_cnt)
290 		new_rel_idx -= rxq->pit_size_cnt;
291 
292 	ret = t7xx_dpmaif_dlq_add_pit_remain_cnt(hw_info, rxq->index, rel_entry_num);
293 	if (ret) {
294 		dev_err(rxq->dpmaif_ctrl->dev, "PIT release failure: %d\n", ret);
295 		return ret;
296 	}
297 
298 	rxq->pit_release_rd_idx = new_rel_idx;
299 	return 0;
300 }
301 
t7xx_dpmaif_set_bat_mask(struct dpmaif_bat_request * bat_req,unsigned int idx)302 static void t7xx_dpmaif_set_bat_mask(struct dpmaif_bat_request *bat_req, unsigned int idx)
303 {
304 	unsigned long flags;
305 
306 	spin_lock_irqsave(&bat_req->mask_lock, flags);
307 	set_bit(idx, bat_req->bat_bitmap);
308 	spin_unlock_irqrestore(&bat_req->mask_lock, flags);
309 }
310 
t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue * rxq,const unsigned int cur_bid)311 static int t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue *rxq,
312 				       const unsigned int cur_bid)
313 {
314 	struct dpmaif_bat_request *bat_frag = rxq->bat_frag;
315 	struct dpmaif_bat_page *bat_page;
316 
317 	if (cur_bid >= DPMAIF_FRG_COUNT)
318 		return -EINVAL;
319 
320 	bat_page = bat_frag->bat_skb + cur_bid;
321 	if (!bat_page->page)
322 		return -EINVAL;
323 
324 	return 0;
325 }
326 
t7xx_unmap_bat_page(struct device * dev,struct dpmaif_bat_page * bat_page_base,unsigned int index)327 static void t7xx_unmap_bat_page(struct device *dev, struct dpmaif_bat_page *bat_page_base,
328 				unsigned int index)
329 {
330 	struct dpmaif_bat_page *bat_page = bat_page_base + index;
331 
332 	if (bat_page->page) {
333 		dma_unmap_page(dev, bat_page->data_bus_addr, bat_page->data_len, DMA_FROM_DEVICE);
334 		put_page(bat_page->page);
335 		bat_page->page = NULL;
336 	}
337 }
338 
339 /**
340  * t7xx_dpmaif_rx_frag_alloc() - Allocates buffers for the Fragment BAT ring.
341  * @dpmaif_ctrl: Pointer to DPMAIF context structure.
342  * @bat_req: Pointer to BAT request structure.
343  * @buf_cnt: Number of buffers to allocate.
344  * @initial: Indicates if the ring is being populated for the first time.
345  *
346  * Fragment BAT is used when the received packet does not fit in a normal BAT entry.
347  * This function allocates a page fragment and stores the start address of the page
348  * into the Fragment BAT ring.
349  * If this is not the initial call, notify the HW about the new entries.
350  *
351  * Return:
352  * * 0		- Success.
353  * * -ERROR	- Error code.
354  */
t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl * dpmaif_ctrl,struct dpmaif_bat_request * bat_req,const unsigned int buf_cnt,const bool initial)355 int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
356 			      const unsigned int buf_cnt, const bool initial)
357 {
358 	unsigned int buf_space, cur_bat_idx = bat_req->bat_wr_idx;
359 	struct dpmaif_bat_page *bat_skb = bat_req->bat_skb;
360 	int ret = 0, i;
361 
362 	if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
363 		return -EINVAL;
364 
365 	buf_space = t7xx_ring_buf_rd_wr_count(bat_req->bat_size_cnt,
366 					      bat_req->bat_release_rd_idx, bat_req->bat_wr_idx,
367 					      DPMAIF_WRITE);
368 	if (buf_cnt > buf_space) {
369 		dev_err(dpmaif_ctrl->dev,
370 			"Requested more buffers than the space available in RX frag ring\n");
371 		return -EINVAL;
372 	}
373 
374 	for (i = 0; i < buf_cnt; i++) {
375 		struct dpmaif_bat_page *cur_page = bat_skb + cur_bat_idx;
376 		struct dpmaif_bat *cur_bat;
377 		dma_addr_t data_base_addr;
378 
379 		if (!cur_page->page) {
380 			unsigned long offset;
381 			struct page *page;
382 			void *data;
383 
384 			data = netdev_alloc_frag(bat_req->pkt_buf_sz);
385 			if (!data)
386 				break;
387 
388 			page = virt_to_head_page(data);
389 			offset = data - page_address(page);
390 
391 			data_base_addr = dma_map_page(dpmaif_ctrl->dev, page, offset,
392 						      bat_req->pkt_buf_sz, DMA_FROM_DEVICE);
393 			if (dma_mapping_error(dpmaif_ctrl->dev, data_base_addr)) {
394 				put_page(virt_to_head_page(data));
395 				dev_err(dpmaif_ctrl->dev, "DMA mapping fail\n");
396 				break;
397 			}
398 
399 			cur_page->page = page;
400 			cur_page->data_bus_addr = data_base_addr;
401 			cur_page->offset = offset;
402 			cur_page->data_len = bat_req->pkt_buf_sz;
403 		}
404 
405 		data_base_addr = cur_page->data_bus_addr;
406 		cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
407 		cur_bat->buffer_addr_ext = upper_32_bits(data_base_addr);
408 		cur_bat->p_buffer_addr = lower_32_bits(data_base_addr);
409 		cur_bat_idx = t7xx_ring_buf_get_next_wr_idx(bat_req->bat_size_cnt, cur_bat_idx);
410 	}
411 
412 	bat_req->bat_wr_idx = cur_bat_idx;
413 
414 	if (!initial)
415 		t7xx_dpmaif_dl_snd_hw_frg_cnt(&dpmaif_ctrl->hw_info, i);
416 
417 	if (i < buf_cnt) {
418 		ret = -ENOMEM;
419 		if (initial) {
420 			while (--i > 0)
421 				t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
422 		}
423 	}
424 
425 	return ret;
426 }
427 
t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,struct sk_buff * skb)428 static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq,
429 				       const struct dpmaif_pit *pkt_info,
430 				       struct sk_buff *skb)
431 {
432 	unsigned long long data_bus_addr, data_base_addr;
433 	struct device *dev = rxq->dpmaif_ctrl->dev;
434 	struct dpmaif_bat_page *page_info;
435 	unsigned int data_len;
436 	int data_offset;
437 
438 	page_info = rxq->bat_frag->bat_skb;
439 	page_info += t7xx_normal_pit_bid(pkt_info);
440 	dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE);
441 
442 	if (!page_info->page)
443 		return -EINVAL;
444 
445 	data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
446 	data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
447 	data_base_addr = page_info->data_bus_addr;
448 	data_offset = data_bus_addr - data_base_addr;
449 	data_offset += page_info->offset;
450 	data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
451 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
452 			data_offset, data_len, page_info->data_len);
453 
454 	page_info->page = NULL;
455 	page_info->offset = 0;
456 	page_info->data_len = 0;
457 	return 0;
458 }
459 
t7xx_dpmaif_get_frag(struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,const struct dpmaif_cur_rx_skb_info * skb_info)460 static int t7xx_dpmaif_get_frag(struct dpmaif_rx_queue *rxq,
461 				const struct dpmaif_pit *pkt_info,
462 				const struct dpmaif_cur_rx_skb_info *skb_info)
463 {
464 	unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
465 	int ret;
466 
467 	ret = t7xx_frag_bat_cur_bid_check(rxq, cur_bid);
468 	if (ret < 0)
469 		return ret;
470 
471 	ret = t7xx_dpmaif_set_frag_to_skb(rxq, pkt_info, skb_info->cur_skb);
472 	if (ret < 0) {
473 		dev_err(rxq->dpmaif_ctrl->dev, "Failed to set frag data to skb: %d\n", ret);
474 		return ret;
475 	}
476 
477 	t7xx_dpmaif_set_bat_mask(rxq->bat_frag, cur_bid);
478 	return 0;
479 }
480 
t7xx_bat_cur_bid_check(struct dpmaif_rx_queue * rxq,const unsigned int cur_bid)481 static int t7xx_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, const unsigned int cur_bid)
482 {
483 	struct dpmaif_bat_skb *bat_skb = rxq->bat_req->bat_skb;
484 
485 	bat_skb += cur_bid;
486 	if (cur_bid >= DPMAIF_BAT_COUNT || !bat_skb->skb)
487 		return -EINVAL;
488 
489 	return 0;
490 }
491 
t7xx_dpmaif_read_pit_seq(const struct dpmaif_pit * pit)492 static int t7xx_dpmaif_read_pit_seq(const struct dpmaif_pit *pit)
493 {
494 	return FIELD_GET(PD_PIT_PIT_SEQ, le32_to_cpu(pit->pd.footer));
495 }
496 
t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pit)497 static int t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue *rxq,
498 				     const struct dpmaif_pit *pit)
499 {
500 	unsigned int cur_pit_seq, expect_pit_seq = rxq->expect_pit_seq;
501 
502 	if (read_poll_timeout_atomic(t7xx_dpmaif_read_pit_seq, cur_pit_seq,
503 				     cur_pit_seq == expect_pit_seq, DPMAIF_POLL_PIT_TIME_US,
504 				     DPMAIF_POLL_PIT_MAX_TIME_US, false, pit))
505 		return -EFAULT;
506 
507 	rxq->expect_pit_seq++;
508 	if (rxq->expect_pit_seq >= DPMAIF_DL_PIT_SEQ_VALUE)
509 		rxq->expect_pit_seq = 0;
510 
511 	return 0;
512 }
513 
t7xx_dpmaif_avail_pkt_bat_cnt(struct dpmaif_bat_request * bat_req)514 static unsigned int t7xx_dpmaif_avail_pkt_bat_cnt(struct dpmaif_bat_request *bat_req)
515 {
516 	unsigned int zero_index;
517 	unsigned long flags;
518 
519 	spin_lock_irqsave(&bat_req->mask_lock, flags);
520 
521 	zero_index = find_next_zero_bit(bat_req->bat_bitmap, bat_req->bat_size_cnt,
522 					bat_req->bat_release_rd_idx);
523 
524 	if (zero_index < bat_req->bat_size_cnt) {
525 		spin_unlock_irqrestore(&bat_req->mask_lock, flags);
526 		return zero_index - bat_req->bat_release_rd_idx;
527 	}
528 
529 	/* limiting the search till bat_release_rd_idx */
530 	zero_index = find_first_zero_bit(bat_req->bat_bitmap, bat_req->bat_release_rd_idx);
531 	spin_unlock_irqrestore(&bat_req->mask_lock, flags);
532 	return bat_req->bat_size_cnt - bat_req->bat_release_rd_idx + zero_index;
533 }
534 
t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue * rxq,const unsigned int rel_entry_num,const enum bat_type buf_type)535 static int t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue *rxq,
536 					 const unsigned int rel_entry_num,
537 					 const enum bat_type buf_type)
538 {
539 	struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
540 	unsigned int old_rel_idx, new_rel_idx, hw_rd_idx, i;
541 	struct dpmaif_bat_request *bat;
542 	unsigned long flags;
543 
544 	if (!rxq->que_started || !rel_entry_num)
545 		return -EINVAL;
546 
547 	if (buf_type == BAT_TYPE_FRAG) {
548 		bat = rxq->bat_frag;
549 		hw_rd_idx = t7xx_dpmaif_dl_get_frg_rd_idx(hw_info, rxq->index);
550 	} else {
551 		bat = rxq->bat_req;
552 		hw_rd_idx = t7xx_dpmaif_dl_get_bat_rd_idx(hw_info, rxq->index);
553 	}
554 
555 	if (rel_entry_num >= bat->bat_size_cnt)
556 		return -EINVAL;
557 
558 	old_rel_idx = bat->bat_release_rd_idx;
559 	new_rel_idx = old_rel_idx + rel_entry_num;
560 
561 	/* Do not need to release if the queue is empty */
562 	if (bat->bat_wr_idx == old_rel_idx)
563 		return 0;
564 
565 	if (hw_rd_idx >= old_rel_idx) {
566 		if (new_rel_idx > hw_rd_idx)
567 			return -EINVAL;
568 	}
569 
570 	if (new_rel_idx >= bat->bat_size_cnt) {
571 		new_rel_idx -= bat->bat_size_cnt;
572 		if (new_rel_idx > hw_rd_idx)
573 			return -EINVAL;
574 	}
575 
576 	spin_lock_irqsave(&bat->mask_lock, flags);
577 	for (i = 0; i < rel_entry_num; i++) {
578 		unsigned int index = bat->bat_release_rd_idx + i;
579 
580 		if (index >= bat->bat_size_cnt)
581 			index -= bat->bat_size_cnt;
582 
583 		clear_bit(index, bat->bat_bitmap);
584 	}
585 	spin_unlock_irqrestore(&bat->mask_lock, flags);
586 
587 	bat->bat_release_rd_idx = new_rel_idx;
588 	return rel_entry_num;
589 }
590 
t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue * rxq)591 static int t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue *rxq)
592 {
593 	int ret;
594 
595 	if (rxq->pit_remain_release_cnt < DPMAIF_PIT_CNT_THRESHOLD)
596 		return 0;
597 
598 	ret = t7xx_dpmaifq_release_pit_entry(rxq, rxq->pit_remain_release_cnt);
599 	if (ret)
600 		return ret;
601 
602 	rxq->pit_remain_release_cnt = 0;
603 	return 0;
604 }
605 
t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue * rxq)606 static int t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
607 {
608 	unsigned int bid_cnt;
609 	int ret;
610 
611 	bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_req);
612 	if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
613 		return 0;
614 
615 	ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_NORMAL);
616 	if (ret <= 0) {
617 		dev_err(rxq->dpmaif_ctrl->dev, "Release PKT BAT failed: %d\n", ret);
618 		return ret;
619 	}
620 
621 	ret = t7xx_dpmaif_rx_buf_alloc(rxq->dpmaif_ctrl, rxq->bat_req, rxq->index, bid_cnt, false);
622 	if (ret < 0)
623 		dev_err(rxq->dpmaif_ctrl->dev, "Allocate new RX buffer failed: %d\n", ret);
624 
625 	return ret;
626 }
627 
t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue * rxq)628 static int t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
629 {
630 	unsigned int bid_cnt;
631 	int ret;
632 
633 	bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_frag);
634 	if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
635 		return 0;
636 
637 	ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_FRAG);
638 	if (ret <= 0) {
639 		dev_err(rxq->dpmaif_ctrl->dev, "Release BAT entry failed: %d\n", ret);
640 		return ret;
641 	}
642 
643 	return t7xx_dpmaif_rx_frag_alloc(rxq->dpmaif_ctrl, rxq->bat_frag, bid_cnt, false);
644 }
645 
t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * msg_pit,struct dpmaif_cur_rx_skb_info * skb_info)646 static void t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue *rxq,
647 				      const struct dpmaif_pit *msg_pit,
648 				      struct dpmaif_cur_rx_skb_info *skb_info)
649 {
650 	int header = le32_to_cpu(msg_pit->header);
651 
652 	skb_info->cur_chn_idx = FIELD_GET(MSG_PIT_CHANNEL_ID, header);
653 	skb_info->check_sum = FIELD_GET(MSG_PIT_CHECKSUM, header);
654 	skb_info->pit_dp = FIELD_GET(MSG_PIT_DP, header);
655 	skb_info->pkt_type = FIELD_GET(MSG_PIT_IP, le32_to_cpu(msg_pit->msg.params_3));
656 }
657 
t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,struct dpmaif_cur_rx_skb_info * skb_info)658 static int t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue *rxq,
659 				       const struct dpmaif_pit *pkt_info,
660 				       struct dpmaif_cur_rx_skb_info *skb_info)
661 {
662 	unsigned long long data_bus_addr, data_base_addr;
663 	struct device *dev = rxq->dpmaif_ctrl->dev;
664 	struct dpmaif_bat_skb *bat_skb;
665 	unsigned int data_len;
666 	struct sk_buff *skb;
667 	int data_offset;
668 
669 	bat_skb = rxq->bat_req->bat_skb;
670 	bat_skb += t7xx_normal_pit_bid(pkt_info);
671 	dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
672 
673 	data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
674 	data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
675 	data_base_addr = bat_skb->data_bus_addr;
676 	data_offset = data_bus_addr - data_base_addr;
677 	data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
678 	skb = bat_skb->skb;
679 	skb->len = 0;
680 	skb_reset_tail_pointer(skb);
681 	skb_reserve(skb, data_offset);
682 
683 	if (skb->tail + data_len > skb->end) {
684 		dev_err(dev, "No buffer space available\n");
685 		return -ENOBUFS;
686 	}
687 
688 	skb_put(skb, data_len);
689 	skb_info->cur_skb = skb;
690 	bat_skb->skb = NULL;
691 	return 0;
692 }
693 
t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,struct dpmaif_cur_rx_skb_info * skb_info)694 static int t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue *rxq,
695 				  const struct dpmaif_pit *pkt_info,
696 				  struct dpmaif_cur_rx_skb_info *skb_info)
697 {
698 	unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
699 	int ret;
700 
701 	ret = t7xx_bat_cur_bid_check(rxq, cur_bid);
702 	if (ret < 0)
703 		return ret;
704 
705 	ret = t7xx_dpmaif_set_data_to_skb(rxq, pkt_info, skb_info);
706 	if (ret < 0) {
707 		dev_err(rxq->dpmaif_ctrl->dev, "RX set data to skb failed: %d\n", ret);
708 		return ret;
709 	}
710 
711 	t7xx_dpmaif_set_bat_mask(rxq->bat_req, cur_bid);
712 	return 0;
713 }
714 
t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue * rxq)715 static int t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue *rxq)
716 {
717 	struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
718 	int ret;
719 
720 	queue_work(dpmaif_ctrl->bat_release_wq, &dpmaif_ctrl->bat_release_work);
721 
722 	ret = t7xx_dpmaif_pit_release_and_add(rxq);
723 	if (ret < 0)
724 		dev_err(dpmaif_ctrl->dev, "RXQ%u update PIT failed: %d\n", rxq->index, ret);
725 
726 	return ret;
727 }
728 
t7xx_dpmaif_rx_skb_enqueue(struct dpmaif_rx_queue * rxq,struct sk_buff * skb)729 static void t7xx_dpmaif_rx_skb_enqueue(struct dpmaif_rx_queue *rxq, struct sk_buff *skb)
730 {
731 	unsigned long flags;
732 
733 	spin_lock_irqsave(&rxq->skb_list.lock, flags);
734 	if (rxq->skb_list.qlen < rxq->skb_list_max_len)
735 		__skb_queue_tail(&rxq->skb_list, skb);
736 	else
737 		dev_kfree_skb_any(skb);
738 	spin_unlock_irqrestore(&rxq->skb_list.lock, flags);
739 }
740 
t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue * rxq,struct dpmaif_cur_rx_skb_info * skb_info)741 static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq,
742 			       struct dpmaif_cur_rx_skb_info *skb_info)
743 {
744 	struct sk_buff *skb = skb_info->cur_skb;
745 	struct t7xx_skb_cb *skb_cb;
746 	u8 netif_id;
747 
748 	skb_info->cur_skb = NULL;
749 
750 	if (skb_info->pit_dp) {
751 		dev_kfree_skb_any(skb);
752 		return;
753 	}
754 
755 	skb->ip_summed = skb_info->check_sum == DPMAIF_CS_RESULT_PASS ? CHECKSUM_UNNECESSARY :
756 									CHECKSUM_NONE;
757 	netif_id = FIELD_GET(NETIF_MASK, skb_info->cur_chn_idx);
758 	skb_cb = T7XX_SKB_CB(skb);
759 	skb_cb->netif_idx = netif_id;
760 	skb_cb->rx_pkt_type = skb_info->pkt_type;
761 	t7xx_dpmaif_rx_skb_enqueue(rxq, skb);
762 }
763 
t7xx_dpmaif_rx_start(struct dpmaif_rx_queue * rxq,const unsigned int pit_cnt,const unsigned long timeout)764 static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt,
765 				const unsigned long timeout)
766 {
767 	unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0;
768 	struct device *dev = rxq->dpmaif_ctrl->dev;
769 	struct dpmaif_cur_rx_skb_info *skb_info;
770 	int ret = 0;
771 
772 	pit_len = rxq->pit_size_cnt;
773 	skb_info = &rxq->rx_data_info;
774 	cur_pit = rxq->pit_rd_idx;
775 
776 	for (rx_cnt = 0; rx_cnt < pit_cnt; rx_cnt++) {
777 		struct dpmaif_pit *pkt_info;
778 		u32 val;
779 
780 		if (!skb_info->msg_pit_received && time_after_eq(jiffies, timeout))
781 			break;
782 
783 		pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit;
784 		if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) {
785 			dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index);
786 			return -EAGAIN;
787 		}
788 
789 		val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header));
790 		if (val == DES_PT_MSG) {
791 			if (skb_info->msg_pit_received)
792 				dev_err(dev, "RXQ%u received repeated PIT\n", rxq->index);
793 
794 			skb_info->msg_pit_received = true;
795 			t7xx_dpmaif_parse_msg_pit(rxq, pkt_info, skb_info);
796 		} else { /* DES_PT_PD */
797 			val = FIELD_GET(PD_PIT_BUFFER_TYPE, le32_to_cpu(pkt_info->header));
798 			if (val != PKT_BUF_FRAG)
799 				ret = t7xx_dpmaif_get_rx_pkt(rxq, pkt_info, skb_info);
800 			else if (!skb_info->cur_skb)
801 				ret = -EINVAL;
802 			else
803 				ret = t7xx_dpmaif_get_frag(rxq, pkt_info, skb_info);
804 
805 			if (ret < 0) {
806 				skb_info->err_payload = 1;
807 				dev_err_ratelimited(dev, "RXQ%u error payload\n", rxq->index);
808 			}
809 
810 			val = FIELD_GET(PD_PIT_CONT, le32_to_cpu(pkt_info->header));
811 			if (!val) {
812 				if (!skb_info->err_payload) {
813 					t7xx_dpmaif_rx_skb(rxq, skb_info);
814 				} else if (skb_info->cur_skb) {
815 					dev_kfree_skb_any(skb_info->cur_skb);
816 					skb_info->cur_skb = NULL;
817 				}
818 
819 				memset(skb_info, 0, sizeof(*skb_info));
820 
821 				recv_skb_cnt++;
822 				if (!(recv_skb_cnt & DPMAIF_RX_PUSH_THRESHOLD_MASK)) {
823 					wake_up_all(&rxq->rx_wq);
824 					recv_skb_cnt = 0;
825 				}
826 			}
827 		}
828 
829 		cur_pit = t7xx_ring_buf_get_next_wr_idx(pit_len, cur_pit);
830 		rxq->pit_rd_idx = cur_pit;
831 		rxq->pit_remain_release_cnt++;
832 
833 		if (rx_cnt > 0 && !(rx_cnt % DPMAIF_NOTIFY_RELEASE_COUNT)) {
834 			ret = t7xx_dpmaifq_rx_notify_hw(rxq);
835 			if (ret < 0)
836 				break;
837 		}
838 	}
839 
840 	if (recv_skb_cnt)
841 		wake_up_all(&rxq->rx_wq);
842 
843 	if (!ret)
844 		ret = t7xx_dpmaifq_rx_notify_hw(rxq);
845 
846 	if (ret)
847 		return ret;
848 
849 	return rx_cnt;
850 }
851 
t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue * rxq)852 static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq)
853 {
854 	unsigned int hw_wr_idx, pit_cnt;
855 
856 	if (!rxq->que_started)
857 		return 0;
858 
859 	hw_wr_idx = t7xx_dpmaif_dl_dlq_pit_get_wr_idx(&rxq->dpmaif_ctrl->hw_info, rxq->index);
860 	pit_cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, hw_wr_idx,
861 					    DPMAIF_READ);
862 	rxq->pit_wr_idx = hw_wr_idx;
863 	return pit_cnt;
864 }
865 
t7xx_dpmaif_rx_data_collect(struct dpmaif_ctrl * dpmaif_ctrl,const unsigned int q_num,const unsigned int budget)866 static int t7xx_dpmaif_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl,
867 				       const unsigned int q_num, const unsigned int budget)
868 {
869 	struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
870 	unsigned long time_limit;
871 	unsigned int cnt;
872 
873 	time_limit = jiffies + msecs_to_jiffies(DPMAIF_WQ_TIME_LIMIT_MS);
874 
875 	while ((cnt = t7xx_dpmaifq_poll_pit(rxq))) {
876 		unsigned int rd_cnt;
877 		int real_cnt;
878 
879 		rd_cnt = min(cnt, budget);
880 
881 		real_cnt = t7xx_dpmaif_rx_start(rxq, rd_cnt, time_limit);
882 		if (real_cnt < 0)
883 			return real_cnt;
884 
885 		if (real_cnt < cnt)
886 			return -EAGAIN;
887 	}
888 
889 	return 0;
890 }
891 
t7xx_dpmaif_do_rx(struct dpmaif_ctrl * dpmaif_ctrl,struct dpmaif_rx_queue * rxq)892 static void t7xx_dpmaif_do_rx(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_rx_queue *rxq)
893 {
894 	struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info;
895 	int ret;
896 
897 	ret = t7xx_dpmaif_rx_data_collect(dpmaif_ctrl, rxq->index, rxq->budget);
898 	if (ret < 0) {
899 		/* Try one more time */
900 		queue_work(rxq->worker, &rxq->dpmaif_rxq_work);
901 		t7xx_dpmaif_clr_ip_busy_sts(hw_info);
902 	} else {
903 		t7xx_dpmaif_clr_ip_busy_sts(hw_info);
904 		t7xx_dpmaif_dlq_unmask_rx_done(hw_info, rxq->index);
905 	}
906 }
907 
t7xx_dpmaif_rxq_work(struct work_struct * work)908 static void t7xx_dpmaif_rxq_work(struct work_struct *work)
909 {
910 	struct dpmaif_rx_queue *rxq = container_of(work, struct dpmaif_rx_queue, dpmaif_rxq_work);
911 	struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
912 	int ret;
913 
914 	atomic_set(&rxq->rx_processing, 1);
915 	/* Ensure rx_processing is changed to 1 before actually begin RX flow */
916 	smp_mb();
917 
918 	if (!rxq->que_started) {
919 		atomic_set(&rxq->rx_processing, 0);
920 		dev_err(dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
921 		return;
922 	}
923 
924 	ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
925 	if (ret < 0 && ret != -EACCES)
926 		return;
927 
928 	t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
929 	if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev))
930 		t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq);
931 
932 	t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
933 	pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
934 	pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
935 	atomic_set(&rxq->rx_processing, 0);
936 }
937 
t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl * dpmaif_ctrl,const unsigned int que_mask)938 void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
939 {
940 	struct dpmaif_rx_queue *rxq;
941 	int qno;
942 
943 	qno = ffs(que_mask) - 1;
944 	if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) {
945 		dev_err(dpmaif_ctrl->dev, "Invalid RXQ number: %u\n", qno);
946 		return;
947 	}
948 
949 	rxq = &dpmaif_ctrl->rxq[qno];
950 	queue_work(rxq->worker, &rxq->dpmaif_rxq_work);
951 }
952 
t7xx_dpmaif_base_free(const struct dpmaif_ctrl * dpmaif_ctrl,const struct dpmaif_bat_request * bat_req)953 static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl,
954 				  const struct dpmaif_bat_request *bat_req)
955 {
956 	if (bat_req->bat_base)
957 		dma_free_coherent(dpmaif_ctrl->dev,
958 				  bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
959 				  bat_req->bat_base, bat_req->bat_bus_addr);
960 }
961 
962 /**
963  * t7xx_dpmaif_bat_alloc() - Allocate the BAT ring buffer.
964  * @dpmaif_ctrl: Pointer to DPMAIF context structure.
965  * @bat_req: Pointer to BAT request structure.
966  * @buf_type: BAT ring type.
967  *
968  * This function allocates the BAT ring buffer shared with the HW device, also allocates
969  * a buffer used to store information about the BAT skbs for further release.
970  *
971  * Return:
972  * * 0		- Success.
973  * * -ERROR	- Error code.
974  */
t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl * dpmaif_ctrl,struct dpmaif_bat_request * bat_req,const enum bat_type buf_type)975 int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
976 			  const enum bat_type buf_type)
977 {
978 	int sw_buf_size;
979 
980 	if (buf_type == BAT_TYPE_FRAG) {
981 		sw_buf_size = sizeof(struct dpmaif_bat_page);
982 		bat_req->bat_size_cnt = DPMAIF_FRG_COUNT;
983 		bat_req->pkt_buf_sz = DPMAIF_HW_FRG_PKTBUF;
984 	} else {
985 		sw_buf_size = sizeof(struct dpmaif_bat_skb);
986 		bat_req->bat_size_cnt = DPMAIF_BAT_COUNT;
987 		bat_req->pkt_buf_sz = DPMAIF_HW_BAT_PKTBUF;
988 	}
989 
990 	bat_req->type = buf_type;
991 	bat_req->bat_wr_idx = 0;
992 	bat_req->bat_release_rd_idx = 0;
993 
994 	bat_req->bat_base = dma_alloc_coherent(dpmaif_ctrl->dev,
995 					       bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
996 					       &bat_req->bat_bus_addr, GFP_KERNEL | __GFP_ZERO);
997 	if (!bat_req->bat_base)
998 		return -ENOMEM;
999 
1000 	/* For AP SW to record skb information */
1001 	bat_req->bat_skb = devm_kzalloc(dpmaif_ctrl->dev, bat_req->bat_size_cnt * sw_buf_size,
1002 					GFP_KERNEL);
1003 	if (!bat_req->bat_skb)
1004 		goto err_free_dma_mem;
1005 
1006 	bat_req->bat_bitmap = bitmap_zalloc(bat_req->bat_size_cnt, GFP_KERNEL);
1007 	if (!bat_req->bat_bitmap)
1008 		goto err_free_dma_mem;
1009 
1010 	spin_lock_init(&bat_req->mask_lock);
1011 	atomic_set(&bat_req->refcnt, 0);
1012 	return 0;
1013 
1014 err_free_dma_mem:
1015 	t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
1016 
1017 	return -ENOMEM;
1018 }
1019 
t7xx_dpmaif_bat_free(const struct dpmaif_ctrl * dpmaif_ctrl,struct dpmaif_bat_request * bat_req)1020 void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req)
1021 {
1022 	if (!bat_req || !atomic_dec_and_test(&bat_req->refcnt))
1023 		return;
1024 
1025 	bitmap_free(bat_req->bat_bitmap);
1026 	bat_req->bat_bitmap = NULL;
1027 
1028 	if (bat_req->bat_skb) {
1029 		unsigned int i;
1030 
1031 		for (i = 0; i < bat_req->bat_size_cnt; i++) {
1032 			if (bat_req->type == BAT_TYPE_FRAG)
1033 				t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
1034 			else
1035 				t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
1036 		}
1037 	}
1038 
1039 	t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
1040 }
1041 
t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue * rxq)1042 static int t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue *rxq)
1043 {
1044 	rxq->pit_size_cnt = DPMAIF_PIT_COUNT;
1045 	rxq->pit_rd_idx = 0;
1046 	rxq->pit_wr_idx = 0;
1047 	rxq->pit_release_rd_idx = 0;
1048 	rxq->expect_pit_seq = 0;
1049 	rxq->pit_remain_release_cnt = 0;
1050 	memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
1051 
1052 	rxq->pit_base = dma_alloc_coherent(rxq->dpmaif_ctrl->dev,
1053 					   rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
1054 					   &rxq->pit_bus_addr, GFP_KERNEL | __GFP_ZERO);
1055 	if (!rxq->pit_base)
1056 		return -ENOMEM;
1057 
1058 	rxq->bat_req = &rxq->dpmaif_ctrl->bat_req;
1059 	atomic_inc(&rxq->bat_req->refcnt);
1060 
1061 	rxq->bat_frag = &rxq->dpmaif_ctrl->bat_frag;
1062 	atomic_inc(&rxq->bat_frag->refcnt);
1063 	return 0;
1064 }
1065 
t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue * rxq)1066 static void t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue *rxq)
1067 {
1068 	if (!rxq->dpmaif_ctrl)
1069 		return;
1070 
1071 	t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req);
1072 	t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag);
1073 
1074 	if (rxq->pit_base)
1075 		dma_free_coherent(rxq->dpmaif_ctrl->dev,
1076 				  rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
1077 				  rxq->pit_base, rxq->pit_bus_addr);
1078 }
1079 
t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue * queue)1080 int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue)
1081 {
1082 	int ret;
1083 
1084 	ret = t7xx_dpmaif_rx_alloc(queue);
1085 	if (ret < 0) {
1086 		dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret);
1087 		return ret;
1088 	}
1089 
1090 	INIT_WORK(&queue->dpmaif_rxq_work, t7xx_dpmaif_rxq_work);
1091 
1092 	queue->worker = alloc_workqueue("dpmaif_rx%d_worker",
1093 					WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1, queue->index);
1094 	if (!queue->worker) {
1095 		ret = -ENOMEM;
1096 		goto err_free_rx_buffer;
1097 	}
1098 
1099 	init_waitqueue_head(&queue->rx_wq);
1100 	skb_queue_head_init(&queue->skb_list);
1101 	queue->skb_list_max_len = queue->bat_req->pkt_buf_sz;
1102 	queue->rx_thread = kthread_run(t7xx_dpmaif_net_rx_push_thread,
1103 				       queue, "dpmaif_rx%d_push", queue->index);
1104 
1105 	ret = PTR_ERR_OR_ZERO(queue->rx_thread);
1106 	if (ret)
1107 		goto err_free_workqueue;
1108 
1109 	return 0;
1110 
1111 err_free_workqueue:
1112 	destroy_workqueue(queue->worker);
1113 
1114 err_free_rx_buffer:
1115 	t7xx_dpmaif_rx_buf_free(queue);
1116 
1117 	return ret;
1118 }
1119 
t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue * queue)1120 void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue)
1121 {
1122 	if (queue->worker)
1123 		destroy_workqueue(queue->worker);
1124 
1125 	if (queue->rx_thread)
1126 		kthread_stop(queue->rx_thread);
1127 
1128 	skb_queue_purge(&queue->skb_list);
1129 	t7xx_dpmaif_rx_buf_free(queue);
1130 }
1131 
t7xx_dpmaif_bat_release_work(struct work_struct * work)1132 static void t7xx_dpmaif_bat_release_work(struct work_struct *work)
1133 {
1134 	struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work);
1135 	struct dpmaif_rx_queue *rxq;
1136 	int ret;
1137 
1138 	ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
1139 	if (ret < 0 && ret != -EACCES)
1140 		return;
1141 
1142 	t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
1143 
1144 	/* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */
1145 	rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT];
1146 	if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
1147 		t7xx_dpmaif_bat_release_and_add(rxq);
1148 		t7xx_dpmaif_frag_bat_release_and_add(rxq);
1149 	}
1150 
1151 	t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
1152 	pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
1153 	pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
1154 }
1155 
t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl * dpmaif_ctrl)1156 int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl)
1157 {
1158 	dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue",
1159 						      WQ_MEM_RECLAIM, 1);
1160 	if (!dpmaif_ctrl->bat_release_wq)
1161 		return -ENOMEM;
1162 
1163 	INIT_WORK(&dpmaif_ctrl->bat_release_work, t7xx_dpmaif_bat_release_work);
1164 	return 0;
1165 }
1166 
t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl * dpmaif_ctrl)1167 void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl)
1168 {
1169 	flush_work(&dpmaif_ctrl->bat_release_work);
1170 
1171 	if (dpmaif_ctrl->bat_release_wq) {
1172 		destroy_workqueue(dpmaif_ctrl->bat_release_wq);
1173 		dpmaif_ctrl->bat_release_wq = NULL;
1174 	}
1175 }
1176 
1177 /**
1178  * t7xx_dpmaif_rx_stop() - Suspend RX flow.
1179  * @dpmaif_ctrl: Pointer to data path control struct dpmaif_ctrl.
1180  *
1181  * Wait for all the RX work to finish executing and mark the RX queue as paused.
1182  */
t7xx_dpmaif_rx_stop(struct dpmaif_ctrl * dpmaif_ctrl)1183 void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
1184 {
1185 	unsigned int i;
1186 
1187 	for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
1188 		struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i];
1189 		int timeout, value;
1190 
1191 		flush_work(&rxq->dpmaif_rxq_work);
1192 
1193 		timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value,
1194 						    !value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US);
1195 		if (timeout)
1196 			dev_err(dpmaif_ctrl->dev, "Stop RX SW failed\n");
1197 
1198 		/* Ensure RX processing has stopped before we set rxq->que_started to false */
1199 		smp_mb();
1200 		rxq->que_started = false;
1201 	}
1202 }
1203 
t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue * rxq)1204 static void t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue *rxq)
1205 {
1206 	int cnt, j = 0;
1207 
1208 	flush_work(&rxq->dpmaif_rxq_work);
1209 	rxq->que_started = false;
1210 
1211 	do {
1212 		cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx,
1213 						rxq->pit_wr_idx, DPMAIF_READ);
1214 
1215 		if (++j >= DPMAIF_MAX_CHECK_COUNT) {
1216 			dev_err(rxq->dpmaif_ctrl->dev, "Stop RX SW failed, %d\n", cnt);
1217 			break;
1218 		}
1219 	} while (cnt);
1220 
1221 	memset(rxq->pit_base, 0, rxq->pit_size_cnt * sizeof(struct dpmaif_pit));
1222 	memset(rxq->bat_req->bat_base, 0, rxq->bat_req->bat_size_cnt * sizeof(struct dpmaif_bat));
1223 	bitmap_zero(rxq->bat_req->bat_bitmap, rxq->bat_req->bat_size_cnt);
1224 	memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
1225 
1226 	rxq->pit_rd_idx = 0;
1227 	rxq->pit_wr_idx = 0;
1228 	rxq->pit_release_rd_idx = 0;
1229 	rxq->expect_pit_seq = 0;
1230 	rxq->pit_remain_release_cnt = 0;
1231 	rxq->bat_req->bat_release_rd_idx = 0;
1232 	rxq->bat_req->bat_wr_idx = 0;
1233 	rxq->bat_frag->bat_release_rd_idx = 0;
1234 	rxq->bat_frag->bat_wr_idx = 0;
1235 }
1236 
t7xx_dpmaif_rx_clear(struct dpmaif_ctrl * dpmaif_ctrl)1237 void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
1238 {
1239 	int i;
1240 
1241 	for (i = 0; i < DPMAIF_RXQ_NUM; i++)
1242 		t7xx_dpmaif_stop_rxq(&dpmaif_ctrl->rxq[i]);
1243 }
1244