• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
stmmac_verify_args(void)187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
__stmmac_disable_all_queues(struct stmmac_priv * priv)203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
stmmac_disable_all_queues(struct stmmac_priv * priv)230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
stmmac_enable_all_queues(struct stmmac_priv * priv)252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
stmmac_service_event_schedule(struct stmmac_priv * priv)275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
stmmac_global_err(struct stmmac_priv * priv)282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
stmmac_clk_csr_set(struct stmmac_priv * priv)301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
print_pkt(unsigned char * buf,int len)356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
stmmac_lpi_entry_timer_config(struct stmmac_priv * priv,bool en)393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
stmmac_enable_eee_mode(struct stmmac_priv * priv)409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
stmmac_disable_eee_mode(struct stmmac_priv * priv)435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
stmmac_eee_ctrl_timer(struct timer_list * t)454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
stmmac_eee_init(struct stmmac_priv * priv)470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Check if MAC core supports the EEE feature. */
475 	if (!priv->dma_cap.eee)
476 		return false;
477 
478 	mutex_lock(&priv->lock);
479 
480 	/* Check if it needs to be deactivated */
481 	if (!priv->eee_active) {
482 		if (priv->eee_enabled) {
483 			netdev_dbg(priv->dev, "disable EEE\n");
484 			stmmac_lpi_entry_timer_config(priv, 0);
485 			del_timer_sync(&priv->eee_ctrl_timer);
486 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
487 			if (priv->hw->xpcs)
488 				xpcs_config_eee(priv->hw->xpcs,
489 						priv->plat->mult_fact_100ns,
490 						false);
491 		}
492 		mutex_unlock(&priv->lock);
493 		return false;
494 	}
495 
496 	if (priv->eee_active && !priv->eee_enabled) {
497 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
498 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
499 				     eee_tw_timer);
500 		if (priv->hw->xpcs)
501 			xpcs_config_eee(priv->hw->xpcs,
502 					priv->plat->mult_fact_100ns,
503 					true);
504 	}
505 
506 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
507 		del_timer_sync(&priv->eee_ctrl_timer);
508 		priv->tx_path_in_lpi_mode = false;
509 		stmmac_lpi_entry_timer_config(priv, 1);
510 	} else {
511 		stmmac_lpi_entry_timer_config(priv, 0);
512 		mod_timer(&priv->eee_ctrl_timer,
513 			  STMMAC_LPI_T(priv->tx_lpi_timer));
514 	}
515 
516 	mutex_unlock(&priv->lock);
517 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
518 	return true;
519 }
520 
521 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
522  * @priv: driver private structure
523  * @p : descriptor pointer
524  * @skb : the socket buffer
525  * Description :
526  * This function will read timestamp from the descriptor & pass it to stack.
527  * and also perform some sanity checks.
528  */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)529 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
530 				   struct dma_desc *p, struct sk_buff *skb)
531 {
532 	struct skb_shared_hwtstamps shhwtstamp;
533 	bool found = false;
534 	u64 ns = 0;
535 
536 	if (!priv->hwts_tx_en)
537 		return;
538 
539 	/* exit if skb doesn't support hw tstamp */
540 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
541 		return;
542 
543 	/* check tx tstamp status */
544 	if (stmmac_get_tx_timestamp_status(priv, p)) {
545 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
546 		found = true;
547 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
548 		found = true;
549 	}
550 
551 	if (found) {
552 		ns -= priv->plat->cdc_error_adj;
553 
554 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
555 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
556 
557 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
558 		/* pass tstamp to stack */
559 		skb_tstamp_tx(skb, &shhwtstamp);
560 	}
561 }
562 
563 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
564  * @priv: driver private structure
565  * @p : descriptor pointer
566  * @np : next descriptor pointer
567  * @skb : the socket buffer
568  * Description :
569  * This function will read received packet's timestamp from the descriptor
570  * and pass it to stack. It also perform some sanity checks.
571  */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)572 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
573 				   struct dma_desc *np, struct sk_buff *skb)
574 {
575 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
576 	struct dma_desc *desc = p;
577 	u64 ns = 0;
578 
579 	if (!priv->hwts_rx_en)
580 		return;
581 	/* For GMAC4, the valid timestamp is from CTX next desc. */
582 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
583 		desc = np;
584 
585 	/* Check if timestamp is available */
586 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
587 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
588 
589 		ns -= priv->plat->cdc_error_adj;
590 
591 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
592 		shhwtstamp = skb_hwtstamps(skb);
593 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
594 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
595 	} else  {
596 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
597 	}
598 }
599 
600 /**
601  *  stmmac_hwtstamp_set - control hardware timestamping.
602  *  @dev: device pointer.
603  *  @ifr: An IOCTL specific structure, that can contain a pointer to
604  *  a proprietary structure used to pass information to the driver.
605  *  Description:
606  *  This function configures the MAC to enable/disable both outgoing(TX)
607  *  and incoming(RX) packets time stamping based on user input.
608  *  Return Value:
609  *  0 on success and an appropriate -ve integer on failure.
610  */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)611 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
612 {
613 	struct stmmac_priv *priv = netdev_priv(dev);
614 	struct hwtstamp_config config;
615 	u32 ptp_v2 = 0;
616 	u32 tstamp_all = 0;
617 	u32 ptp_over_ipv4_udp = 0;
618 	u32 ptp_over_ipv6_udp = 0;
619 	u32 ptp_over_ethernet = 0;
620 	u32 snap_type_sel = 0;
621 	u32 ts_master_en = 0;
622 	u32 ts_event_en = 0;
623 
624 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
625 		netdev_alert(priv->dev, "No support for HW time stamping\n");
626 		priv->hwts_tx_en = 0;
627 		priv->hwts_rx_en = 0;
628 
629 		return -EOPNOTSUPP;
630 	}
631 
632 	if (copy_from_user(&config, ifr->ifr_data,
633 			   sizeof(config)))
634 		return -EFAULT;
635 
636 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
637 		   __func__, config.flags, config.tx_type, config.rx_filter);
638 
639 	if (config.tx_type != HWTSTAMP_TX_OFF &&
640 	    config.tx_type != HWTSTAMP_TX_ON)
641 		return -ERANGE;
642 
643 	if (priv->adv_ts) {
644 		switch (config.rx_filter) {
645 		case HWTSTAMP_FILTER_NONE:
646 			/* time stamp no incoming packet at all */
647 			config.rx_filter = HWTSTAMP_FILTER_NONE;
648 			break;
649 
650 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
651 			/* PTP v1, UDP, any kind of event packet */
652 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
653 			/* 'xmac' hardware can support Sync, Pdelay_Req and
654 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
655 			 * This leaves Delay_Req timestamps out.
656 			 * Enable all events *and* general purpose message
657 			 * timestamping
658 			 */
659 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
660 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
661 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
662 			break;
663 
664 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
665 			/* PTP v1, UDP, Sync packet */
666 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
667 			/* take time stamp for SYNC messages only */
668 			ts_event_en = PTP_TCR_TSEVNTENA;
669 
670 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
671 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
672 			break;
673 
674 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
675 			/* PTP v1, UDP, Delay_req packet */
676 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
677 			/* take time stamp for Delay_Req messages only */
678 			ts_master_en = PTP_TCR_TSMSTRENA;
679 			ts_event_en = PTP_TCR_TSEVNTENA;
680 
681 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
682 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
683 			break;
684 
685 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
686 			/* PTP v2, UDP, any kind of event packet */
687 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
688 			ptp_v2 = PTP_TCR_TSVER2ENA;
689 			/* take time stamp for all event messages */
690 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
691 
692 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
693 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
694 			break;
695 
696 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
697 			/* PTP v2, UDP, Sync packet */
698 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
699 			ptp_v2 = PTP_TCR_TSVER2ENA;
700 			/* take time stamp for SYNC messages only */
701 			ts_event_en = PTP_TCR_TSEVNTENA;
702 
703 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
704 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
705 			break;
706 
707 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
708 			/* PTP v2, UDP, Delay_req packet */
709 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
710 			ptp_v2 = PTP_TCR_TSVER2ENA;
711 			/* take time stamp for Delay_Req messages only */
712 			ts_master_en = PTP_TCR_TSMSTRENA;
713 			ts_event_en = PTP_TCR_TSEVNTENA;
714 
715 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
716 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
717 			break;
718 
719 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
720 			/* PTP v2/802.AS1 any layer, any kind of event packet */
721 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
722 			ptp_v2 = PTP_TCR_TSVER2ENA;
723 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
724 			if (priv->synopsys_id < DWMAC_CORE_4_10)
725 				ts_event_en = PTP_TCR_TSEVNTENA;
726 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
727 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
728 			ptp_over_ethernet = PTP_TCR_TSIPENA;
729 			break;
730 
731 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
732 			/* PTP v2/802.AS1, any layer, Sync packet */
733 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
734 			ptp_v2 = PTP_TCR_TSVER2ENA;
735 			/* take time stamp for SYNC messages only */
736 			ts_event_en = PTP_TCR_TSEVNTENA;
737 
738 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
739 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
740 			ptp_over_ethernet = PTP_TCR_TSIPENA;
741 			break;
742 
743 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
744 			/* PTP v2/802.AS1, any layer, Delay_req packet */
745 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
746 			ptp_v2 = PTP_TCR_TSVER2ENA;
747 			/* take time stamp for Delay_Req messages only */
748 			ts_master_en = PTP_TCR_TSMSTRENA;
749 			ts_event_en = PTP_TCR_TSEVNTENA;
750 
751 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
752 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
753 			ptp_over_ethernet = PTP_TCR_TSIPENA;
754 			break;
755 
756 		case HWTSTAMP_FILTER_NTP_ALL:
757 		case HWTSTAMP_FILTER_ALL:
758 			/* time stamp any incoming packet */
759 			config.rx_filter = HWTSTAMP_FILTER_ALL;
760 			tstamp_all = PTP_TCR_TSENALL;
761 			break;
762 
763 		default:
764 			return -ERANGE;
765 		}
766 	} else {
767 		switch (config.rx_filter) {
768 		case HWTSTAMP_FILTER_NONE:
769 			config.rx_filter = HWTSTAMP_FILTER_NONE;
770 			break;
771 		default:
772 			/* PTP v1, UDP, any kind of event packet */
773 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
774 			break;
775 		}
776 	}
777 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
778 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
779 
780 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
781 
782 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
783 		priv->systime_flags |= tstamp_all | ptp_v2 |
784 				       ptp_over_ethernet | ptp_over_ipv6_udp |
785 				       ptp_over_ipv4_udp | ts_event_en |
786 				       ts_master_en | snap_type_sel;
787 	}
788 
789 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
790 
791 	memcpy(&priv->tstamp_config, &config, sizeof(config));
792 
793 	return copy_to_user(ifr->ifr_data, &config,
794 			    sizeof(config)) ? -EFAULT : 0;
795 }
796 
797 /**
798  *  stmmac_hwtstamp_get - read hardware timestamping.
799  *  @dev: device pointer.
800  *  @ifr: An IOCTL specific structure, that can contain a pointer to
801  *  a proprietary structure used to pass information to the driver.
802  *  Description:
803  *  This function obtain the current hardware timestamping settings
804  *  as requested.
805  */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)806 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
807 {
808 	struct stmmac_priv *priv = netdev_priv(dev);
809 	struct hwtstamp_config *config = &priv->tstamp_config;
810 
811 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
812 		return -EOPNOTSUPP;
813 
814 	return copy_to_user(ifr->ifr_data, config,
815 			    sizeof(*config)) ? -EFAULT : 0;
816 }
817 
818 /**
819  * stmmac_init_tstamp_counter - init hardware timestamping counter
820  * @priv: driver private structure
821  * @systime_flags: timestamping flags
822  * Description:
823  * Initialize hardware counter for packet timestamping.
824  * This is valid as long as the interface is open and not suspended.
825  * Will be rerun after resuming from suspend, case in which the timestamping
826  * flags updated by stmmac_hwtstamp_set() also need to be restored.
827  */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)828 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
829 {
830 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
831 	struct timespec64 now;
832 	u32 sec_inc = 0;
833 	u64 temp = 0;
834 
835 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
836 		return -EOPNOTSUPP;
837 
838 	if (!priv->plat->clk_ptp_rate) {
839 		netdev_err(priv->dev, "Invalid PTP clock rate");
840 		return -EINVAL;
841 	}
842 
843 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
844 	priv->systime_flags = systime_flags;
845 
846 	/* program Sub Second Increment reg */
847 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
848 					   priv->plat->clk_ptp_rate,
849 					   xmac, &sec_inc);
850 	temp = div_u64(1000000000ULL, sec_inc);
851 
852 	/* Store sub second increment for later use */
853 	priv->sub_second_inc = sec_inc;
854 
855 	/* calculate default added value:
856 	 * formula is :
857 	 * addend = (2^32)/freq_div_ratio;
858 	 * where, freq_div_ratio = 1e9ns/sec_inc
859 	 */
860 	temp = (u64)(temp << 32);
861 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
862 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
863 
864 	/* initialize system time */
865 	ktime_get_real_ts64(&now);
866 
867 	/* lower 32 bits of tv_sec are safe until y2106 */
868 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
869 
870 	return 0;
871 }
872 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
873 
874 /**
875  * stmmac_init_ptp - init PTP
876  * @priv: driver private structure
877  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
878  * This is done by looking at the HW cap. register.
879  * This function also registers the ptp driver.
880  */
stmmac_init_ptp(struct stmmac_priv * priv)881 static int stmmac_init_ptp(struct stmmac_priv *priv)
882 {
883 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
884 	int ret;
885 
886 	if (priv->plat->ptp_clk_freq_config)
887 		priv->plat->ptp_clk_freq_config(priv);
888 
889 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
890 	if (ret)
891 		return ret;
892 
893 	priv->adv_ts = 0;
894 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
895 	if (xmac && priv->dma_cap.atime_stamp)
896 		priv->adv_ts = 1;
897 	/* Dwmac 3.x core with extend_desc can support adv_ts */
898 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
899 		priv->adv_ts = 1;
900 
901 	if (priv->dma_cap.time_stamp)
902 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
903 
904 	if (priv->adv_ts)
905 		netdev_info(priv->dev,
906 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
907 
908 	priv->hwts_tx_en = 0;
909 	priv->hwts_rx_en = 0;
910 
911 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
912 		stmmac_hwtstamp_correct_latency(priv, priv);
913 
914 	return 0;
915 }
916 
stmmac_release_ptp(struct stmmac_priv * priv)917 static void stmmac_release_ptp(struct stmmac_priv *priv)
918 {
919 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
920 	stmmac_ptp_unregister(priv);
921 }
922 
923 /**
924  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
925  *  @priv: driver private structure
926  *  @duplex: duplex passed to the next function
927  *  Description: It is used for configuring the flow control in all queues
928  */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)929 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
930 {
931 	u32 tx_cnt = priv->plat->tx_queues_to_use;
932 
933 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
934 			priv->pause, tx_cnt);
935 }
936 
stmmac_mac_get_caps(struct phylink_config * config,phy_interface_t interface)937 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
938 					 phy_interface_t interface)
939 {
940 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
941 
942 	/* Refresh the MAC-specific capabilities */
943 	stmmac_mac_update_caps(priv);
944 
945 	config->mac_capabilities = priv->hw->link.caps;
946 
947 	if (priv->plat->max_speed)
948 		phylink_limit_mac_speed(config, priv->plat->max_speed);
949 
950 	return config->mac_capabilities;
951 }
952 
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)953 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
954 						 phy_interface_t interface)
955 {
956 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
957 	struct phylink_pcs *pcs;
958 
959 	if (priv->plat->select_pcs) {
960 		pcs = priv->plat->select_pcs(priv, interface);
961 		if (!IS_ERR(pcs))
962 			return pcs;
963 	}
964 
965 	return NULL;
966 }
967 
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)968 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
969 			      const struct phylink_link_state *state)
970 {
971 	/* Nothing to do, xpcs_config() handles everything */
972 }
973 
stmmac_fpe_link_state_handle(struct stmmac_priv * priv,bool is_up)974 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
975 {
976 	struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
977 	unsigned long flags;
978 
979 	timer_shutdown_sync(&fpe_cfg->verify_timer);
980 
981 	spin_lock_irqsave(&fpe_cfg->lock, flags);
982 
983 	if (is_up && fpe_cfg->pmac_enabled) {
984 		/* VERIFY process requires pmac enabled when NIC comes up */
985 		stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
986 				     priv->plat->tx_queues_to_use,
987 				     priv->plat->rx_queues_to_use,
988 				     false, true);
989 
990 		/* New link => maybe new partner => new verification process */
991 		stmmac_fpe_apply(priv);
992 	} else {
993 		/* No link => turn off EFPE */
994 		stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
995 				     priv->plat->tx_queues_to_use,
996 				     priv->plat->rx_queues_to_use,
997 				     false, false);
998 	}
999 
1000 	spin_unlock_irqrestore(&fpe_cfg->lock, flags);
1001 }
1002 
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)1003 static void stmmac_mac_link_down(struct phylink_config *config,
1004 				 unsigned int mode, phy_interface_t interface)
1005 {
1006 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1007 
1008 	stmmac_mac_set(priv, priv->ioaddr, false);
1009 	priv->eee_active = false;
1010 	priv->tx_lpi_enabled = false;
1011 	priv->eee_enabled = stmmac_eee_init(priv);
1012 	stmmac_set_eee_pls(priv, priv->hw, false);
1013 
1014 	if (priv->dma_cap.fpesel)
1015 		stmmac_fpe_link_state_handle(priv, false);
1016 }
1017 
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)1018 static void stmmac_mac_link_up(struct phylink_config *config,
1019 			       struct phy_device *phy,
1020 			       unsigned int mode, phy_interface_t interface,
1021 			       int speed, int duplex,
1022 			       bool tx_pause, bool rx_pause)
1023 {
1024 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1025 	u32 old_ctrl, ctrl;
1026 
1027 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1028 	    priv->plat->serdes_powerup)
1029 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1030 
1031 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1032 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1033 
1034 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1035 		switch (speed) {
1036 		case SPEED_10000:
1037 			ctrl |= priv->hw->link.xgmii.speed10000;
1038 			break;
1039 		case SPEED_5000:
1040 			ctrl |= priv->hw->link.xgmii.speed5000;
1041 			break;
1042 		case SPEED_2500:
1043 			ctrl |= priv->hw->link.xgmii.speed2500;
1044 			break;
1045 		default:
1046 			return;
1047 		}
1048 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1049 		switch (speed) {
1050 		case SPEED_100000:
1051 			ctrl |= priv->hw->link.xlgmii.speed100000;
1052 			break;
1053 		case SPEED_50000:
1054 			ctrl |= priv->hw->link.xlgmii.speed50000;
1055 			break;
1056 		case SPEED_40000:
1057 			ctrl |= priv->hw->link.xlgmii.speed40000;
1058 			break;
1059 		case SPEED_25000:
1060 			ctrl |= priv->hw->link.xlgmii.speed25000;
1061 			break;
1062 		case SPEED_10000:
1063 			ctrl |= priv->hw->link.xgmii.speed10000;
1064 			break;
1065 		case SPEED_2500:
1066 			ctrl |= priv->hw->link.speed2500;
1067 			break;
1068 		case SPEED_1000:
1069 			ctrl |= priv->hw->link.speed1000;
1070 			break;
1071 		default:
1072 			return;
1073 		}
1074 	} else {
1075 		switch (speed) {
1076 		case SPEED_2500:
1077 			ctrl |= priv->hw->link.speed2500;
1078 			break;
1079 		case SPEED_1000:
1080 			ctrl |= priv->hw->link.speed1000;
1081 			break;
1082 		case SPEED_100:
1083 			ctrl |= priv->hw->link.speed100;
1084 			break;
1085 		case SPEED_10:
1086 			ctrl |= priv->hw->link.speed10;
1087 			break;
1088 		default:
1089 			return;
1090 		}
1091 	}
1092 
1093 	priv->speed = speed;
1094 
1095 	if (priv->plat->fix_mac_speed)
1096 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1097 
1098 	if (!duplex)
1099 		ctrl &= ~priv->hw->link.duplex;
1100 	else
1101 		ctrl |= priv->hw->link.duplex;
1102 
1103 	/* Flow Control operation */
1104 	if (rx_pause && tx_pause)
1105 		priv->flow_ctrl = FLOW_AUTO;
1106 	else if (rx_pause && !tx_pause)
1107 		priv->flow_ctrl = FLOW_RX;
1108 	else if (!rx_pause && tx_pause)
1109 		priv->flow_ctrl = FLOW_TX;
1110 	else
1111 		priv->flow_ctrl = FLOW_OFF;
1112 
1113 	stmmac_mac_flow_ctrl(priv, duplex);
1114 
1115 	if (ctrl != old_ctrl)
1116 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1117 
1118 	stmmac_mac_set(priv, priv->ioaddr, true);
1119 	if (phy && priv->dma_cap.eee) {
1120 		priv->eee_active =
1121 			phy_init_eee(phy, !(priv->plat->flags &
1122 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1123 		priv->eee_enabled = stmmac_eee_init(priv);
1124 		priv->tx_lpi_enabled = priv->eee_enabled;
1125 		stmmac_set_eee_pls(priv, priv->hw, true);
1126 	}
1127 
1128 	if (priv->dma_cap.fpesel)
1129 		stmmac_fpe_link_state_handle(priv, true);
1130 
1131 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1132 		stmmac_hwtstamp_correct_latency(priv, priv);
1133 }
1134 
1135 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1136 	.mac_get_caps = stmmac_mac_get_caps,
1137 	.mac_select_pcs = stmmac_mac_select_pcs,
1138 	.mac_config = stmmac_mac_config,
1139 	.mac_link_down = stmmac_mac_link_down,
1140 	.mac_link_up = stmmac_mac_link_up,
1141 };
1142 
1143 /**
1144  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1145  * @priv: driver private structure
1146  * Description: this is to verify if the HW supports the PCS.
1147  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1148  * configured for the TBI, RTBI, or SGMII PHY interface.
1149  */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1150 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1151 {
1152 	int interface = priv->plat->mac_interface;
1153 
1154 	if (priv->dma_cap.pcs) {
1155 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1156 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1157 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1158 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1159 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1160 			priv->hw->pcs = STMMAC_PCS_RGMII;
1161 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1162 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1163 			priv->hw->pcs = STMMAC_PCS_SGMII;
1164 		}
1165 	}
1166 }
1167 
1168 /**
1169  * stmmac_init_phy - PHY initialization
1170  * @dev: net device structure
1171  * Description: it initializes the driver's PHY state, and attaches the PHY
1172  * to the mac driver.
1173  *  Return value:
1174  *  0 on success
1175  */
stmmac_init_phy(struct net_device * dev)1176 static int stmmac_init_phy(struct net_device *dev)
1177 {
1178 	struct stmmac_priv *priv = netdev_priv(dev);
1179 	struct fwnode_handle *phy_fwnode;
1180 	struct fwnode_handle *fwnode;
1181 	int ret;
1182 
1183 	if (!phylink_expects_phy(priv->phylink))
1184 		return 0;
1185 
1186 	fwnode = priv->plat->port_node;
1187 	if (!fwnode)
1188 		fwnode = dev_fwnode(priv->device);
1189 
1190 	if (fwnode)
1191 		phy_fwnode = fwnode_get_phy_node(fwnode);
1192 	else
1193 		phy_fwnode = NULL;
1194 
1195 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1196 	 * manually parse it
1197 	 */
1198 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1199 		int addr = priv->plat->phy_addr;
1200 		struct phy_device *phydev;
1201 
1202 		if (addr < 0) {
1203 			netdev_err(priv->dev, "no phy found\n");
1204 			return -ENODEV;
1205 		}
1206 
1207 		phydev = mdiobus_get_phy(priv->mii, addr);
1208 		if (!phydev) {
1209 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1210 			return -ENODEV;
1211 		}
1212 
1213 		if (priv->dma_cap.eee)
1214 			phy_support_eee(phydev);
1215 
1216 		ret = phylink_connect_phy(priv->phylink, phydev);
1217 	} else {
1218 		fwnode_handle_put(phy_fwnode);
1219 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1220 	}
1221 
1222 	if (!priv->plat->pmt) {
1223 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1224 
1225 		phylink_ethtool_get_wol(priv->phylink, &wol);
1226 		device_set_wakeup_capable(priv->device, !!wol.supported);
1227 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1228 	}
1229 
1230 	return ret;
1231 }
1232 
stmmac_phy_setup(struct stmmac_priv * priv)1233 static int stmmac_phy_setup(struct stmmac_priv *priv)
1234 {
1235 	struct stmmac_mdio_bus_data *mdio_bus_data;
1236 	int mode = priv->plat->phy_interface;
1237 	struct fwnode_handle *fwnode;
1238 	struct phylink *phylink;
1239 
1240 	priv->phylink_config.dev = &priv->dev->dev;
1241 	priv->phylink_config.type = PHYLINK_NETDEV;
1242 	priv->phylink_config.mac_managed_pm = true;
1243 
1244 	/* Stmmac always requires an RX clock for hardware initialization */
1245 	priv->phylink_config.mac_requires_rxc = true;
1246 
1247 	mdio_bus_data = priv->plat->mdio_bus_data;
1248 	if (mdio_bus_data)
1249 		priv->phylink_config.default_an_inband =
1250 			mdio_bus_data->default_an_inband;
1251 
1252 	/* Set the platform/firmware specified interface mode. Note, phylink
1253 	 * deals with the PHY interface mode, not the MAC interface mode.
1254 	 */
1255 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1256 
1257 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1258 	if (priv->hw->xpcs)
1259 		xpcs_get_interfaces(priv->hw->xpcs,
1260 				    priv->phylink_config.supported_interfaces);
1261 
1262 	fwnode = priv->plat->port_node;
1263 	if (!fwnode)
1264 		fwnode = dev_fwnode(priv->device);
1265 
1266 	phylink = phylink_create(&priv->phylink_config, fwnode,
1267 				 mode, &stmmac_phylink_mac_ops);
1268 	if (IS_ERR(phylink))
1269 		return PTR_ERR(phylink);
1270 
1271 	priv->phylink = phylink;
1272 	return 0;
1273 }
1274 
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1275 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1276 				    struct stmmac_dma_conf *dma_conf)
1277 {
1278 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1279 	unsigned int desc_size;
1280 	void *head_rx;
1281 	u32 queue;
1282 
1283 	/* Display RX rings */
1284 	for (queue = 0; queue < rx_cnt; queue++) {
1285 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1286 
1287 		pr_info("\tRX Queue %u rings\n", queue);
1288 
1289 		if (priv->extend_desc) {
1290 			head_rx = (void *)rx_q->dma_erx;
1291 			desc_size = sizeof(struct dma_extended_desc);
1292 		} else {
1293 			head_rx = (void *)rx_q->dma_rx;
1294 			desc_size = sizeof(struct dma_desc);
1295 		}
1296 
1297 		/* Display RX ring */
1298 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1299 				    rx_q->dma_rx_phy, desc_size);
1300 	}
1301 }
1302 
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1303 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1304 				    struct stmmac_dma_conf *dma_conf)
1305 {
1306 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1307 	unsigned int desc_size;
1308 	void *head_tx;
1309 	u32 queue;
1310 
1311 	/* Display TX rings */
1312 	for (queue = 0; queue < tx_cnt; queue++) {
1313 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1314 
1315 		pr_info("\tTX Queue %d rings\n", queue);
1316 
1317 		if (priv->extend_desc) {
1318 			head_tx = (void *)tx_q->dma_etx;
1319 			desc_size = sizeof(struct dma_extended_desc);
1320 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1321 			head_tx = (void *)tx_q->dma_entx;
1322 			desc_size = sizeof(struct dma_edesc);
1323 		} else {
1324 			head_tx = (void *)tx_q->dma_tx;
1325 			desc_size = sizeof(struct dma_desc);
1326 		}
1327 
1328 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1329 				    tx_q->dma_tx_phy, desc_size);
1330 	}
1331 }
1332 
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1333 static void stmmac_display_rings(struct stmmac_priv *priv,
1334 				 struct stmmac_dma_conf *dma_conf)
1335 {
1336 	/* Display RX ring */
1337 	stmmac_display_rx_rings(priv, dma_conf);
1338 
1339 	/* Display TX ring */
1340 	stmmac_display_tx_rings(priv, dma_conf);
1341 }
1342 
stmmac_set_bfsize(int mtu,int bufsize)1343 static int stmmac_set_bfsize(int mtu, int bufsize)
1344 {
1345 	int ret = bufsize;
1346 
1347 	if (mtu >= BUF_SIZE_8KiB)
1348 		ret = BUF_SIZE_16KiB;
1349 	else if (mtu >= BUF_SIZE_4KiB)
1350 		ret = BUF_SIZE_8KiB;
1351 	else if (mtu >= BUF_SIZE_2KiB)
1352 		ret = BUF_SIZE_4KiB;
1353 	else if (mtu > DEFAULT_BUFSIZE)
1354 		ret = BUF_SIZE_2KiB;
1355 	else
1356 		ret = DEFAULT_BUFSIZE;
1357 
1358 	return ret;
1359 }
1360 
1361 /**
1362  * stmmac_clear_rx_descriptors - clear RX descriptors
1363  * @priv: driver private structure
1364  * @dma_conf: structure to take the dma data
1365  * @queue: RX queue index
1366  * Description: this function is called to clear the RX descriptors
1367  * in case of both basic and extended descriptors are used.
1368  */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1369 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1370 					struct stmmac_dma_conf *dma_conf,
1371 					u32 queue)
1372 {
1373 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1374 	int i;
1375 
1376 	/* Clear the RX descriptors */
1377 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1378 		if (priv->extend_desc)
1379 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1380 					priv->use_riwt, priv->mode,
1381 					(i == dma_conf->dma_rx_size - 1),
1382 					dma_conf->dma_buf_sz);
1383 		else
1384 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1385 					priv->use_riwt, priv->mode,
1386 					(i == dma_conf->dma_rx_size - 1),
1387 					dma_conf->dma_buf_sz);
1388 }
1389 
1390 /**
1391  * stmmac_clear_tx_descriptors - clear tx descriptors
1392  * @priv: driver private structure
1393  * @dma_conf: structure to take the dma data
1394  * @queue: TX queue index.
1395  * Description: this function is called to clear the TX descriptors
1396  * in case of both basic and extended descriptors are used.
1397  */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1398 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1399 					struct stmmac_dma_conf *dma_conf,
1400 					u32 queue)
1401 {
1402 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1403 	int i;
1404 
1405 	/* Clear the TX descriptors */
1406 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1407 		int last = (i == (dma_conf->dma_tx_size - 1));
1408 		struct dma_desc *p;
1409 
1410 		if (priv->extend_desc)
1411 			p = &tx_q->dma_etx[i].basic;
1412 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1413 			p = &tx_q->dma_entx[i].basic;
1414 		else
1415 			p = &tx_q->dma_tx[i];
1416 
1417 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1418 	}
1419 }
1420 
1421 /**
1422  * stmmac_clear_descriptors - clear descriptors
1423  * @priv: driver private structure
1424  * @dma_conf: structure to take the dma data
1425  * Description: this function is called to clear the TX and RX descriptors
1426  * in case of both basic and extended descriptors are used.
1427  */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1428 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1429 				     struct stmmac_dma_conf *dma_conf)
1430 {
1431 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1432 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1433 	u32 queue;
1434 
1435 	/* Clear the RX descriptors */
1436 	for (queue = 0; queue < rx_queue_cnt; queue++)
1437 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1438 
1439 	/* Clear the TX descriptors */
1440 	for (queue = 0; queue < tx_queue_cnt; queue++)
1441 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1442 }
1443 
1444 /**
1445  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1446  * @priv: driver private structure
1447  * @dma_conf: structure to take the dma data
1448  * @p: descriptor pointer
1449  * @i: descriptor index
1450  * @flags: gfp flag
1451  * @queue: RX queue index
1452  * Description: this function is called to allocate a receive buffer, perform
1453  * the DMA mapping and init the descriptor.
1454  */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1455 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1456 				  struct stmmac_dma_conf *dma_conf,
1457 				  struct dma_desc *p,
1458 				  int i, gfp_t flags, u32 queue)
1459 {
1460 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1461 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1462 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1463 
1464 	if (priv->dma_cap.host_dma_width <= 32)
1465 		gfp |= GFP_DMA32;
1466 
1467 	if (!buf->page) {
1468 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1469 		if (!buf->page)
1470 			return -ENOMEM;
1471 		buf->page_offset = stmmac_rx_offset(priv);
1472 	}
1473 
1474 	if (priv->sph && !buf->sec_page) {
1475 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1476 		if (!buf->sec_page)
1477 			return -ENOMEM;
1478 
1479 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1480 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1481 	} else {
1482 		buf->sec_page = NULL;
1483 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1484 	}
1485 
1486 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1487 
1488 	stmmac_set_desc_addr(priv, p, buf->addr);
1489 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1490 		stmmac_init_desc3(priv, p);
1491 
1492 	return 0;
1493 }
1494 
1495 /**
1496  * stmmac_free_rx_buffer - free RX dma buffers
1497  * @priv: private structure
1498  * @rx_q: RX queue
1499  * @i: buffer index.
1500  */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1501 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1502 				  struct stmmac_rx_queue *rx_q,
1503 				  int i)
1504 {
1505 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1506 
1507 	if (buf->page)
1508 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1509 	buf->page = NULL;
1510 
1511 	if (buf->sec_page)
1512 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1513 	buf->sec_page = NULL;
1514 }
1515 
1516 /**
1517  * stmmac_free_tx_buffer - free RX dma buffers
1518  * @priv: private structure
1519  * @dma_conf: structure to take the dma data
1520  * @queue: RX queue index
1521  * @i: buffer index.
1522  */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1523 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1524 				  struct stmmac_dma_conf *dma_conf,
1525 				  u32 queue, int i)
1526 {
1527 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1528 
1529 	if (tx_q->tx_skbuff_dma[i].buf &&
1530 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1531 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1532 			dma_unmap_page(priv->device,
1533 				       tx_q->tx_skbuff_dma[i].buf,
1534 				       tx_q->tx_skbuff_dma[i].len,
1535 				       DMA_TO_DEVICE);
1536 		else
1537 			dma_unmap_single(priv->device,
1538 					 tx_q->tx_skbuff_dma[i].buf,
1539 					 tx_q->tx_skbuff_dma[i].len,
1540 					 DMA_TO_DEVICE);
1541 	}
1542 
1543 	if (tx_q->xdpf[i] &&
1544 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1545 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1546 		xdp_return_frame(tx_q->xdpf[i]);
1547 		tx_q->xdpf[i] = NULL;
1548 	}
1549 
1550 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1551 		tx_q->xsk_frames_done++;
1552 
1553 	if (tx_q->tx_skbuff[i] &&
1554 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1555 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1556 		tx_q->tx_skbuff[i] = NULL;
1557 	}
1558 
1559 	tx_q->tx_skbuff_dma[i].buf = 0;
1560 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1561 }
1562 
1563 /**
1564  * dma_free_rx_skbufs - free RX dma buffers
1565  * @priv: private structure
1566  * @dma_conf: structure to take the dma data
1567  * @queue: RX queue index
1568  */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1569 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1570 			       struct stmmac_dma_conf *dma_conf,
1571 			       u32 queue)
1572 {
1573 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1574 	int i;
1575 
1576 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1577 		stmmac_free_rx_buffer(priv, rx_q, i);
1578 }
1579 
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1580 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1581 				   struct stmmac_dma_conf *dma_conf,
1582 				   u32 queue, gfp_t flags)
1583 {
1584 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1585 	int i;
1586 
1587 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1588 		struct dma_desc *p;
1589 		int ret;
1590 
1591 		if (priv->extend_desc)
1592 			p = &((rx_q->dma_erx + i)->basic);
1593 		else
1594 			p = rx_q->dma_rx + i;
1595 
1596 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1597 					     queue);
1598 		if (ret)
1599 			return ret;
1600 
1601 		rx_q->buf_alloc_num++;
1602 	}
1603 
1604 	return 0;
1605 }
1606 
1607 /**
1608  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1609  * @priv: private structure
1610  * @dma_conf: structure to take the dma data
1611  * @queue: RX queue index
1612  */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1613 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1614 				struct stmmac_dma_conf *dma_conf,
1615 				u32 queue)
1616 {
1617 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1618 	int i;
1619 
1620 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1621 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1622 
1623 		if (!buf->xdp)
1624 			continue;
1625 
1626 		xsk_buff_free(buf->xdp);
1627 		buf->xdp = NULL;
1628 	}
1629 }
1630 
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1631 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1632 				      struct stmmac_dma_conf *dma_conf,
1633 				      u32 queue)
1634 {
1635 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1636 	int i;
1637 
1638 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1639 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1640 	 * use this macro to make sure no size violations.
1641 	 */
1642 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1643 
1644 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1645 		struct stmmac_rx_buffer *buf;
1646 		dma_addr_t dma_addr;
1647 		struct dma_desc *p;
1648 
1649 		if (priv->extend_desc)
1650 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1651 		else
1652 			p = rx_q->dma_rx + i;
1653 
1654 		buf = &rx_q->buf_pool[i];
1655 
1656 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1657 		if (!buf->xdp)
1658 			return -ENOMEM;
1659 
1660 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1661 		stmmac_set_desc_addr(priv, p, dma_addr);
1662 		rx_q->buf_alloc_num++;
1663 	}
1664 
1665 	return 0;
1666 }
1667 
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1668 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1669 {
1670 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1671 		return NULL;
1672 
1673 	return xsk_get_pool_from_qid(priv->dev, queue);
1674 }
1675 
1676 /**
1677  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1678  * @priv: driver private structure
1679  * @dma_conf: structure to take the dma data
1680  * @queue: RX queue index
1681  * @flags: gfp flag.
1682  * Description: this function initializes the DMA RX descriptors
1683  * and allocates the socket buffers. It supports the chained and ring
1684  * modes.
1685  */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1686 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1687 				    struct stmmac_dma_conf *dma_conf,
1688 				    u32 queue, gfp_t flags)
1689 {
1690 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1691 	int ret;
1692 
1693 	netif_dbg(priv, probe, priv->dev,
1694 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1695 		  (u32)rx_q->dma_rx_phy);
1696 
1697 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1698 
1699 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1700 
1701 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1702 
1703 	if (rx_q->xsk_pool) {
1704 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1705 						   MEM_TYPE_XSK_BUFF_POOL,
1706 						   NULL));
1707 		netdev_info(priv->dev,
1708 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1709 			    rx_q->queue_index);
1710 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1711 	} else {
1712 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1713 						   MEM_TYPE_PAGE_POOL,
1714 						   rx_q->page_pool));
1715 		netdev_info(priv->dev,
1716 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1717 			    rx_q->queue_index);
1718 	}
1719 
1720 	if (rx_q->xsk_pool) {
1721 		/* RX XDP ZC buffer pool may not be populated, e.g.
1722 		 * xdpsock TX-only.
1723 		 */
1724 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1725 	} else {
1726 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1727 		if (ret < 0)
1728 			return -ENOMEM;
1729 	}
1730 
1731 	/* Setup the chained descriptor addresses */
1732 	if (priv->mode == STMMAC_CHAIN_MODE) {
1733 		if (priv->extend_desc)
1734 			stmmac_mode_init(priv, rx_q->dma_erx,
1735 					 rx_q->dma_rx_phy,
1736 					 dma_conf->dma_rx_size, 1);
1737 		else
1738 			stmmac_mode_init(priv, rx_q->dma_rx,
1739 					 rx_q->dma_rx_phy,
1740 					 dma_conf->dma_rx_size, 0);
1741 	}
1742 
1743 	return 0;
1744 }
1745 
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1746 static int init_dma_rx_desc_rings(struct net_device *dev,
1747 				  struct stmmac_dma_conf *dma_conf,
1748 				  gfp_t flags)
1749 {
1750 	struct stmmac_priv *priv = netdev_priv(dev);
1751 	u32 rx_count = priv->plat->rx_queues_to_use;
1752 	int queue;
1753 	int ret;
1754 
1755 	/* RX INITIALIZATION */
1756 	netif_dbg(priv, probe, priv->dev,
1757 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1758 
1759 	for (queue = 0; queue < rx_count; queue++) {
1760 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1761 		if (ret)
1762 			goto err_init_rx_buffers;
1763 	}
1764 
1765 	return 0;
1766 
1767 err_init_rx_buffers:
1768 	while (queue >= 0) {
1769 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1770 
1771 		if (rx_q->xsk_pool)
1772 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1773 		else
1774 			dma_free_rx_skbufs(priv, dma_conf, queue);
1775 
1776 		rx_q->buf_alloc_num = 0;
1777 		rx_q->xsk_pool = NULL;
1778 
1779 		queue--;
1780 	}
1781 
1782 	return ret;
1783 }
1784 
1785 /**
1786  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1787  * @priv: driver private structure
1788  * @dma_conf: structure to take the dma data
1789  * @queue: TX queue index
1790  * Description: this function initializes the DMA TX descriptors
1791  * and allocates the socket buffers. It supports the chained and ring
1792  * modes.
1793  */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1794 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1795 				    struct stmmac_dma_conf *dma_conf,
1796 				    u32 queue)
1797 {
1798 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1799 	int i;
1800 
1801 	netif_dbg(priv, probe, priv->dev,
1802 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1803 		  (u32)tx_q->dma_tx_phy);
1804 
1805 	/* Setup the chained descriptor addresses */
1806 	if (priv->mode == STMMAC_CHAIN_MODE) {
1807 		if (priv->extend_desc)
1808 			stmmac_mode_init(priv, tx_q->dma_etx,
1809 					 tx_q->dma_tx_phy,
1810 					 dma_conf->dma_tx_size, 1);
1811 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1812 			stmmac_mode_init(priv, tx_q->dma_tx,
1813 					 tx_q->dma_tx_phy,
1814 					 dma_conf->dma_tx_size, 0);
1815 	}
1816 
1817 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1818 
1819 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1820 		struct dma_desc *p;
1821 
1822 		if (priv->extend_desc)
1823 			p = &((tx_q->dma_etx + i)->basic);
1824 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1825 			p = &((tx_q->dma_entx + i)->basic);
1826 		else
1827 			p = tx_q->dma_tx + i;
1828 
1829 		stmmac_clear_desc(priv, p);
1830 
1831 		tx_q->tx_skbuff_dma[i].buf = 0;
1832 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1833 		tx_q->tx_skbuff_dma[i].len = 0;
1834 		tx_q->tx_skbuff_dma[i].last_segment = false;
1835 		tx_q->tx_skbuff[i] = NULL;
1836 	}
1837 
1838 	return 0;
1839 }
1840 
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1841 static int init_dma_tx_desc_rings(struct net_device *dev,
1842 				  struct stmmac_dma_conf *dma_conf)
1843 {
1844 	struct stmmac_priv *priv = netdev_priv(dev);
1845 	u32 tx_queue_cnt;
1846 	u32 queue;
1847 
1848 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1849 
1850 	for (queue = 0; queue < tx_queue_cnt; queue++)
1851 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1852 
1853 	return 0;
1854 }
1855 
1856 /**
1857  * init_dma_desc_rings - init the RX/TX descriptor rings
1858  * @dev: net device structure
1859  * @dma_conf: structure to take the dma data
1860  * @flags: gfp flag.
1861  * Description: this function initializes the DMA RX/TX descriptors
1862  * and allocates the socket buffers. It supports the chained and ring
1863  * modes.
1864  */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1865 static int init_dma_desc_rings(struct net_device *dev,
1866 			       struct stmmac_dma_conf *dma_conf,
1867 			       gfp_t flags)
1868 {
1869 	struct stmmac_priv *priv = netdev_priv(dev);
1870 	int ret;
1871 
1872 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1873 	if (ret)
1874 		return ret;
1875 
1876 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1877 
1878 	stmmac_clear_descriptors(priv, dma_conf);
1879 
1880 	if (netif_msg_hw(priv))
1881 		stmmac_display_rings(priv, dma_conf);
1882 
1883 	return ret;
1884 }
1885 
1886 /**
1887  * dma_free_tx_skbufs - free TX dma buffers
1888  * @priv: private structure
1889  * @dma_conf: structure to take the dma data
1890  * @queue: TX queue index
1891  */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1892 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1893 			       struct stmmac_dma_conf *dma_conf,
1894 			       u32 queue)
1895 {
1896 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1897 	int i;
1898 
1899 	tx_q->xsk_frames_done = 0;
1900 
1901 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1902 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1903 
1904 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1905 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1906 		tx_q->xsk_frames_done = 0;
1907 		tx_q->xsk_pool = NULL;
1908 	}
1909 }
1910 
1911 /**
1912  * stmmac_free_tx_skbufs - free TX skb buffers
1913  * @priv: private structure
1914  */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1915 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1916 {
1917 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1918 	u32 queue;
1919 
1920 	for (queue = 0; queue < tx_queue_cnt; queue++)
1921 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1922 }
1923 
1924 /**
1925  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1926  * @priv: private structure
1927  * @dma_conf: structure to take the dma data
1928  * @queue: RX queue index
1929  */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1930 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1931 					 struct stmmac_dma_conf *dma_conf,
1932 					 u32 queue)
1933 {
1934 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1935 
1936 	/* Release the DMA RX socket buffers */
1937 	if (rx_q->xsk_pool)
1938 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1939 	else
1940 		dma_free_rx_skbufs(priv, dma_conf, queue);
1941 
1942 	rx_q->buf_alloc_num = 0;
1943 	rx_q->xsk_pool = NULL;
1944 
1945 	/* Free DMA regions of consistent memory previously allocated */
1946 	if (!priv->extend_desc)
1947 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1948 				  sizeof(struct dma_desc),
1949 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1950 	else
1951 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1952 				  sizeof(struct dma_extended_desc),
1953 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1954 
1955 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1956 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1957 
1958 	kfree(rx_q->buf_pool);
1959 	if (rx_q->page_pool)
1960 		page_pool_destroy(rx_q->page_pool);
1961 }
1962 
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1963 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1964 				       struct stmmac_dma_conf *dma_conf)
1965 {
1966 	u32 rx_count = priv->plat->rx_queues_to_use;
1967 	u32 queue;
1968 
1969 	/* Free RX queue resources */
1970 	for (queue = 0; queue < rx_count; queue++)
1971 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1972 }
1973 
1974 /**
1975  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1976  * @priv: private structure
1977  * @dma_conf: structure to take the dma data
1978  * @queue: TX queue index
1979  */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1980 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1981 					 struct stmmac_dma_conf *dma_conf,
1982 					 u32 queue)
1983 {
1984 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1985 	size_t size;
1986 	void *addr;
1987 
1988 	/* Release the DMA TX socket buffers */
1989 	dma_free_tx_skbufs(priv, dma_conf, queue);
1990 
1991 	if (priv->extend_desc) {
1992 		size = sizeof(struct dma_extended_desc);
1993 		addr = tx_q->dma_etx;
1994 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1995 		size = sizeof(struct dma_edesc);
1996 		addr = tx_q->dma_entx;
1997 	} else {
1998 		size = sizeof(struct dma_desc);
1999 		addr = tx_q->dma_tx;
2000 	}
2001 
2002 	size *= dma_conf->dma_tx_size;
2003 
2004 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2005 
2006 	kfree(tx_q->tx_skbuff_dma);
2007 	kfree(tx_q->tx_skbuff);
2008 }
2009 
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2010 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2011 				       struct stmmac_dma_conf *dma_conf)
2012 {
2013 	u32 tx_count = priv->plat->tx_queues_to_use;
2014 	u32 queue;
2015 
2016 	/* Free TX queue resources */
2017 	for (queue = 0; queue < tx_count; queue++)
2018 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2019 }
2020 
2021 /**
2022  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2023  * @priv: private structure
2024  * @dma_conf: structure to take the dma data
2025  * @queue: RX queue index
2026  * Description: according to which descriptor can be used (extend or basic)
2027  * this function allocates the resources for TX and RX paths. In case of
2028  * reception, for example, it pre-allocated the RX socket buffer in order to
2029  * allow zero-copy mechanism.
2030  */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2031 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2032 					 struct stmmac_dma_conf *dma_conf,
2033 					 u32 queue)
2034 {
2035 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2036 	struct stmmac_channel *ch = &priv->channel[queue];
2037 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2038 	struct page_pool_params pp_params = { 0 };
2039 	unsigned int num_pages;
2040 	unsigned int napi_id;
2041 	int ret;
2042 
2043 	rx_q->queue_index = queue;
2044 	rx_q->priv_data = priv;
2045 
2046 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2047 	pp_params.pool_size = dma_conf->dma_rx_size;
2048 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2049 	pp_params.order = ilog2(num_pages);
2050 	pp_params.nid = dev_to_node(priv->device);
2051 	pp_params.dev = priv->device;
2052 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2053 	pp_params.offset = stmmac_rx_offset(priv);
2054 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2055 
2056 	rx_q->page_pool = page_pool_create(&pp_params);
2057 	if (IS_ERR(rx_q->page_pool)) {
2058 		ret = PTR_ERR(rx_q->page_pool);
2059 		rx_q->page_pool = NULL;
2060 		return ret;
2061 	}
2062 
2063 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2064 				 sizeof(*rx_q->buf_pool),
2065 				 GFP_KERNEL);
2066 	if (!rx_q->buf_pool)
2067 		return -ENOMEM;
2068 
2069 	if (priv->extend_desc) {
2070 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2071 						   dma_conf->dma_rx_size *
2072 						   sizeof(struct dma_extended_desc),
2073 						   &rx_q->dma_rx_phy,
2074 						   GFP_KERNEL);
2075 		if (!rx_q->dma_erx)
2076 			return -ENOMEM;
2077 
2078 	} else {
2079 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2080 						  dma_conf->dma_rx_size *
2081 						  sizeof(struct dma_desc),
2082 						  &rx_q->dma_rx_phy,
2083 						  GFP_KERNEL);
2084 		if (!rx_q->dma_rx)
2085 			return -ENOMEM;
2086 	}
2087 
2088 	if (stmmac_xdp_is_enabled(priv) &&
2089 	    test_bit(queue, priv->af_xdp_zc_qps))
2090 		napi_id = ch->rxtx_napi.napi_id;
2091 	else
2092 		napi_id = ch->rx_napi.napi_id;
2093 
2094 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2095 			       rx_q->queue_index,
2096 			       napi_id);
2097 	if (ret) {
2098 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2099 		return -EINVAL;
2100 	}
2101 
2102 	return 0;
2103 }
2104 
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2105 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2106 				       struct stmmac_dma_conf *dma_conf)
2107 {
2108 	u32 rx_count = priv->plat->rx_queues_to_use;
2109 	u32 queue;
2110 	int ret;
2111 
2112 	/* RX queues buffers and DMA */
2113 	for (queue = 0; queue < rx_count; queue++) {
2114 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2115 		if (ret)
2116 			goto err_dma;
2117 	}
2118 
2119 	return 0;
2120 
2121 err_dma:
2122 	free_dma_rx_desc_resources(priv, dma_conf);
2123 
2124 	return ret;
2125 }
2126 
2127 /**
2128  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2129  * @priv: private structure
2130  * @dma_conf: structure to take the dma data
2131  * @queue: TX queue index
2132  * Description: according to which descriptor can be used (extend or basic)
2133  * this function allocates the resources for TX and RX paths. In case of
2134  * reception, for example, it pre-allocated the RX socket buffer in order to
2135  * allow zero-copy mechanism.
2136  */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2137 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2138 					 struct stmmac_dma_conf *dma_conf,
2139 					 u32 queue)
2140 {
2141 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2142 	size_t size;
2143 	void *addr;
2144 
2145 	tx_q->queue_index = queue;
2146 	tx_q->priv_data = priv;
2147 
2148 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2149 				      sizeof(*tx_q->tx_skbuff_dma),
2150 				      GFP_KERNEL);
2151 	if (!tx_q->tx_skbuff_dma)
2152 		return -ENOMEM;
2153 
2154 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2155 				  sizeof(struct sk_buff *),
2156 				  GFP_KERNEL);
2157 	if (!tx_q->tx_skbuff)
2158 		return -ENOMEM;
2159 
2160 	if (priv->extend_desc)
2161 		size = sizeof(struct dma_extended_desc);
2162 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2163 		size = sizeof(struct dma_edesc);
2164 	else
2165 		size = sizeof(struct dma_desc);
2166 
2167 	size *= dma_conf->dma_tx_size;
2168 
2169 	addr = dma_alloc_coherent(priv->device, size,
2170 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2171 	if (!addr)
2172 		return -ENOMEM;
2173 
2174 	if (priv->extend_desc)
2175 		tx_q->dma_etx = addr;
2176 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2177 		tx_q->dma_entx = addr;
2178 	else
2179 		tx_q->dma_tx = addr;
2180 
2181 	return 0;
2182 }
2183 
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2184 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2185 				       struct stmmac_dma_conf *dma_conf)
2186 {
2187 	u32 tx_count = priv->plat->tx_queues_to_use;
2188 	u32 queue;
2189 	int ret;
2190 
2191 	/* TX queues buffers and DMA */
2192 	for (queue = 0; queue < tx_count; queue++) {
2193 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2194 		if (ret)
2195 			goto err_dma;
2196 	}
2197 
2198 	return 0;
2199 
2200 err_dma:
2201 	free_dma_tx_desc_resources(priv, dma_conf);
2202 	return ret;
2203 }
2204 
2205 /**
2206  * alloc_dma_desc_resources - alloc TX/RX resources.
2207  * @priv: private structure
2208  * @dma_conf: structure to take the dma data
2209  * Description: according to which descriptor can be used (extend or basic)
2210  * this function allocates the resources for TX and RX paths. In case of
2211  * reception, for example, it pre-allocated the RX socket buffer in order to
2212  * allow zero-copy mechanism.
2213  */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2214 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2215 				    struct stmmac_dma_conf *dma_conf)
2216 {
2217 	/* RX Allocation */
2218 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2219 
2220 	if (ret)
2221 		return ret;
2222 
2223 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2224 
2225 	return ret;
2226 }
2227 
2228 /**
2229  * free_dma_desc_resources - free dma desc resources
2230  * @priv: private structure
2231  * @dma_conf: structure to take the dma data
2232  */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2233 static void free_dma_desc_resources(struct stmmac_priv *priv,
2234 				    struct stmmac_dma_conf *dma_conf)
2235 {
2236 	/* Release the DMA TX socket buffers */
2237 	free_dma_tx_desc_resources(priv, dma_conf);
2238 
2239 	/* Release the DMA RX socket buffers later
2240 	 * to ensure all pending XDP_TX buffers are returned.
2241 	 */
2242 	free_dma_rx_desc_resources(priv, dma_conf);
2243 }
2244 
2245 /**
2246  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2247  *  @priv: driver private structure
2248  *  Description: It is used for enabling the rx queues in the MAC
2249  */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2250 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2251 {
2252 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2253 	int queue;
2254 	u8 mode;
2255 
2256 	for (queue = 0; queue < rx_queues_count; queue++) {
2257 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2258 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2259 	}
2260 }
2261 
2262 /**
2263  * stmmac_start_rx_dma - start RX DMA channel
2264  * @priv: driver private structure
2265  * @chan: RX channel index
2266  * Description:
2267  * This starts a RX DMA channel
2268  */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2269 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2270 {
2271 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2272 	stmmac_start_rx(priv, priv->ioaddr, chan);
2273 }
2274 
2275 /**
2276  * stmmac_start_tx_dma - start TX DMA channel
2277  * @priv: driver private structure
2278  * @chan: TX channel index
2279  * Description:
2280  * This starts a TX DMA channel
2281  */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2282 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2283 {
2284 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2285 	stmmac_start_tx(priv, priv->ioaddr, chan);
2286 }
2287 
2288 /**
2289  * stmmac_stop_rx_dma - stop RX DMA channel
2290  * @priv: driver private structure
2291  * @chan: RX channel index
2292  * Description:
2293  * This stops a RX DMA channel
2294  */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2295 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2296 {
2297 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2298 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2299 }
2300 
2301 /**
2302  * stmmac_stop_tx_dma - stop TX DMA channel
2303  * @priv: driver private structure
2304  * @chan: TX channel index
2305  * Description:
2306  * This stops a TX DMA channel
2307  */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2308 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2309 {
2310 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2311 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2312 }
2313 
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2314 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2315 {
2316 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2317 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2318 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2319 	u32 chan;
2320 
2321 	for (chan = 0; chan < dma_csr_ch; chan++) {
2322 		struct stmmac_channel *ch = &priv->channel[chan];
2323 		unsigned long flags;
2324 
2325 		spin_lock_irqsave(&ch->lock, flags);
2326 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2327 		spin_unlock_irqrestore(&ch->lock, flags);
2328 	}
2329 }
2330 
2331 /**
2332  * stmmac_start_all_dma - start all RX and TX DMA channels
2333  * @priv: driver private structure
2334  * Description:
2335  * This starts all the RX and TX DMA channels
2336  */
stmmac_start_all_dma(struct stmmac_priv * priv)2337 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2338 {
2339 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2340 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2341 	u32 chan = 0;
2342 
2343 	for (chan = 0; chan < rx_channels_count; chan++)
2344 		stmmac_start_rx_dma(priv, chan);
2345 
2346 	for (chan = 0; chan < tx_channels_count; chan++)
2347 		stmmac_start_tx_dma(priv, chan);
2348 }
2349 
2350 /**
2351  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2352  * @priv: driver private structure
2353  * Description:
2354  * This stops the RX and TX DMA channels
2355  */
stmmac_stop_all_dma(struct stmmac_priv * priv)2356 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2357 {
2358 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2359 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2360 	u32 chan = 0;
2361 
2362 	for (chan = 0; chan < rx_channels_count; chan++)
2363 		stmmac_stop_rx_dma(priv, chan);
2364 
2365 	for (chan = 0; chan < tx_channels_count; chan++)
2366 		stmmac_stop_tx_dma(priv, chan);
2367 }
2368 
2369 /**
2370  *  stmmac_dma_operation_mode - HW DMA operation mode
2371  *  @priv: driver private structure
2372  *  Description: it is used for configuring the DMA operation mode register in
2373  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2374  */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2375 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2376 {
2377 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2378 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2379 	int rxfifosz = priv->plat->rx_fifo_size;
2380 	int txfifosz = priv->plat->tx_fifo_size;
2381 	u32 txmode = 0;
2382 	u32 rxmode = 0;
2383 	u32 chan = 0;
2384 	u8 qmode = 0;
2385 
2386 	if (rxfifosz == 0)
2387 		rxfifosz = priv->dma_cap.rx_fifo_size;
2388 	if (txfifosz == 0)
2389 		txfifosz = priv->dma_cap.tx_fifo_size;
2390 
2391 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2392 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2393 		rxfifosz /= rx_channels_count;
2394 		txfifosz /= tx_channels_count;
2395 	}
2396 
2397 	if (priv->plat->force_thresh_dma_mode) {
2398 		txmode = tc;
2399 		rxmode = tc;
2400 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2401 		/*
2402 		 * In case of GMAC, SF mode can be enabled
2403 		 * to perform the TX COE in HW. This depends on:
2404 		 * 1) TX COE if actually supported
2405 		 * 2) There is no bugged Jumbo frame support
2406 		 *    that needs to not insert csum in the TDES.
2407 		 */
2408 		txmode = SF_DMA_MODE;
2409 		rxmode = SF_DMA_MODE;
2410 		priv->xstats.threshold = SF_DMA_MODE;
2411 	} else {
2412 		txmode = tc;
2413 		rxmode = SF_DMA_MODE;
2414 	}
2415 
2416 	/* configure all channels */
2417 	for (chan = 0; chan < rx_channels_count; chan++) {
2418 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2419 		u32 buf_size;
2420 
2421 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2422 
2423 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2424 				rxfifosz, qmode);
2425 
2426 		if (rx_q->xsk_pool) {
2427 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2428 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2429 					      buf_size,
2430 					      chan);
2431 		} else {
2432 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2433 					      priv->dma_conf.dma_buf_sz,
2434 					      chan);
2435 		}
2436 	}
2437 
2438 	for (chan = 0; chan < tx_channels_count; chan++) {
2439 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2440 
2441 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2442 				txfifosz, qmode);
2443 	}
2444 }
2445 
stmmac_xsk_request_timestamp(void * _priv)2446 static void stmmac_xsk_request_timestamp(void *_priv)
2447 {
2448 	struct stmmac_metadata_request *meta_req = _priv;
2449 
2450 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2451 	*meta_req->set_ic = true;
2452 }
2453 
stmmac_xsk_fill_timestamp(void * _priv)2454 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2455 {
2456 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2457 	struct stmmac_priv *priv = tx_compl->priv;
2458 	struct dma_desc *desc = tx_compl->desc;
2459 	bool found = false;
2460 	u64 ns = 0;
2461 
2462 	if (!priv->hwts_tx_en)
2463 		return 0;
2464 
2465 	/* check tx tstamp status */
2466 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2467 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2468 		found = true;
2469 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2470 		found = true;
2471 	}
2472 
2473 	if (found) {
2474 		ns -= priv->plat->cdc_error_adj;
2475 		return ns_to_ktime(ns);
2476 	}
2477 
2478 	return 0;
2479 }
2480 
2481 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2482 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2483 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2484 };
2485 
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2486 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2487 {
2488 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2489 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2490 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2491 	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
2492 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2493 	unsigned int entry = tx_q->cur_tx;
2494 	struct dma_desc *tx_desc = NULL;
2495 	struct xdp_desc xdp_desc;
2496 	bool work_done = true;
2497 	u32 tx_set_ic_bit = 0;
2498 
2499 	/* Avoids TX time-out as we are sharing with slow path */
2500 	txq_trans_cond_update(nq);
2501 
2502 	budget = min(budget, stmmac_tx_avail(priv, queue));
2503 
2504 	for (; budget > 0; budget--) {
2505 		struct stmmac_metadata_request meta_req;
2506 		struct xsk_tx_metadata *meta = NULL;
2507 		dma_addr_t dma_addr;
2508 		bool set_ic;
2509 
2510 		/* We are sharing with slow path and stop XSK TX desc submission when
2511 		 * available TX ring is less than threshold.
2512 		 */
2513 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2514 		    !netif_carrier_ok(priv->dev)) {
2515 			work_done = false;
2516 			break;
2517 		}
2518 
2519 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2520 			break;
2521 
2522 		if (priv->est && priv->est->enable &&
2523 		    priv->est->max_sdu[queue] &&
2524 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2525 			priv->xstats.max_sdu_txq_drop[queue]++;
2526 			continue;
2527 		}
2528 
2529 		if (likely(priv->extend_desc))
2530 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2531 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2532 			tx_desc = &tx_q->dma_entx[entry].basic;
2533 		else
2534 			tx_desc = tx_q->dma_tx + entry;
2535 
2536 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2537 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2538 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2539 
2540 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2541 
2542 		/* To return XDP buffer to XSK pool, we simple call
2543 		 * xsk_tx_completed(), so we don't need to fill up
2544 		 * 'buf' and 'xdpf'.
2545 		 */
2546 		tx_q->tx_skbuff_dma[entry].buf = 0;
2547 		tx_q->xdpf[entry] = NULL;
2548 
2549 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2550 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2551 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2552 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2553 
2554 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2555 
2556 		tx_q->tx_count_frames++;
2557 
2558 		if (!priv->tx_coal_frames[queue])
2559 			set_ic = false;
2560 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2561 			set_ic = true;
2562 		else
2563 			set_ic = false;
2564 
2565 		meta_req.priv = priv;
2566 		meta_req.tx_desc = tx_desc;
2567 		meta_req.set_ic = &set_ic;
2568 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2569 					&meta_req);
2570 		if (set_ic) {
2571 			tx_q->tx_count_frames = 0;
2572 			stmmac_set_tx_ic(priv, tx_desc);
2573 			tx_set_ic_bit++;
2574 		}
2575 
2576 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2577 				       csum, priv->mode, true, true,
2578 				       xdp_desc.len);
2579 
2580 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2581 
2582 		xsk_tx_metadata_to_compl(meta,
2583 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2584 
2585 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2586 		entry = tx_q->cur_tx;
2587 	}
2588 	u64_stats_update_begin(&txq_stats->napi_syncp);
2589 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2590 	u64_stats_update_end(&txq_stats->napi_syncp);
2591 
2592 	if (tx_desc) {
2593 		stmmac_flush_tx_descriptors(priv, queue);
2594 		xsk_tx_release(pool);
2595 	}
2596 
2597 	/* Return true if all of the 3 conditions are met
2598 	 *  a) TX Budget is still available
2599 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2600 	 *     pending XSK TX for transmission)
2601 	 */
2602 	return !!budget && work_done;
2603 }
2604 
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2605 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2606 {
2607 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2608 		tc += 64;
2609 
2610 		if (priv->plat->force_thresh_dma_mode)
2611 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2612 		else
2613 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2614 						      chan);
2615 
2616 		priv->xstats.threshold = tc;
2617 	}
2618 }
2619 
2620 /**
2621  * stmmac_tx_clean - to manage the transmission completion
2622  * @priv: driver private structure
2623  * @budget: napi budget limiting this functions packet handling
2624  * @queue: TX queue index
2625  * @pending_packets: signal to arm the TX coal timer
2626  * Description: it reclaims the transmit resources after transmission completes.
2627  * If some packets still needs to be handled, due to TX coalesce, set
2628  * pending_packets to true to make NAPI arm the TX coal timer.
2629  */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue,bool * pending_packets)2630 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2631 			   bool *pending_packets)
2632 {
2633 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2634 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2635 	unsigned int bytes_compl = 0, pkts_compl = 0;
2636 	unsigned int entry, xmits = 0, count = 0;
2637 	u32 tx_packets = 0, tx_errors = 0;
2638 
2639 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2640 
2641 	tx_q->xsk_frames_done = 0;
2642 
2643 	entry = tx_q->dirty_tx;
2644 
2645 	/* Try to clean all TX complete frame in 1 shot */
2646 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2647 		struct xdp_frame *xdpf;
2648 		struct sk_buff *skb;
2649 		struct dma_desc *p;
2650 		int status;
2651 
2652 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2653 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2654 			xdpf = tx_q->xdpf[entry];
2655 			skb = NULL;
2656 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2657 			xdpf = NULL;
2658 			skb = tx_q->tx_skbuff[entry];
2659 		} else {
2660 			xdpf = NULL;
2661 			skb = NULL;
2662 		}
2663 
2664 		if (priv->extend_desc)
2665 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2666 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2667 			p = &tx_q->dma_entx[entry].basic;
2668 		else
2669 			p = tx_q->dma_tx + entry;
2670 
2671 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2672 		/* Check if the descriptor is owned by the DMA */
2673 		if (unlikely(status & tx_dma_own))
2674 			break;
2675 
2676 		count++;
2677 
2678 		/* Make sure descriptor fields are read after reading
2679 		 * the own bit.
2680 		 */
2681 		dma_rmb();
2682 
2683 		/* Just consider the last segment and ...*/
2684 		if (likely(!(status & tx_not_ls))) {
2685 			/* ... verify the status error condition */
2686 			if (unlikely(status & tx_err)) {
2687 				tx_errors++;
2688 				if (unlikely(status & tx_err_bump_tc))
2689 					stmmac_bump_dma_threshold(priv, queue);
2690 			} else {
2691 				tx_packets++;
2692 			}
2693 			if (skb) {
2694 				stmmac_get_tx_hwtstamp(priv, p, skb);
2695 			} else if (tx_q->xsk_pool &&
2696 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2697 				struct stmmac_xsk_tx_complete tx_compl = {
2698 					.priv = priv,
2699 					.desc = p,
2700 				};
2701 
2702 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2703 							 &stmmac_xsk_tx_metadata_ops,
2704 							 &tx_compl);
2705 			}
2706 		}
2707 
2708 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2709 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2710 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2711 				dma_unmap_page(priv->device,
2712 					       tx_q->tx_skbuff_dma[entry].buf,
2713 					       tx_q->tx_skbuff_dma[entry].len,
2714 					       DMA_TO_DEVICE);
2715 			else
2716 				dma_unmap_single(priv->device,
2717 						 tx_q->tx_skbuff_dma[entry].buf,
2718 						 tx_q->tx_skbuff_dma[entry].len,
2719 						 DMA_TO_DEVICE);
2720 			tx_q->tx_skbuff_dma[entry].buf = 0;
2721 			tx_q->tx_skbuff_dma[entry].len = 0;
2722 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2723 		}
2724 
2725 		stmmac_clean_desc3(priv, tx_q, p);
2726 
2727 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2728 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2729 
2730 		if (xdpf &&
2731 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2732 			xdp_return_frame_rx_napi(xdpf);
2733 			tx_q->xdpf[entry] = NULL;
2734 		}
2735 
2736 		if (xdpf &&
2737 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2738 			xdp_return_frame(xdpf);
2739 			tx_q->xdpf[entry] = NULL;
2740 		}
2741 
2742 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2743 			tx_q->xsk_frames_done++;
2744 
2745 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2746 			if (likely(skb)) {
2747 				pkts_compl++;
2748 				bytes_compl += skb->len;
2749 				dev_consume_skb_any(skb);
2750 				tx_q->tx_skbuff[entry] = NULL;
2751 			}
2752 		}
2753 
2754 		stmmac_release_tx_desc(priv, p, priv->mode);
2755 
2756 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2757 	}
2758 	tx_q->dirty_tx = entry;
2759 
2760 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2761 				  pkts_compl, bytes_compl);
2762 
2763 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2764 								queue))) &&
2765 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2766 
2767 		netif_dbg(priv, tx_done, priv->dev,
2768 			  "%s: restart transmit\n", __func__);
2769 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2770 	}
2771 
2772 	if (tx_q->xsk_pool) {
2773 		bool work_done;
2774 
2775 		if (tx_q->xsk_frames_done)
2776 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2777 
2778 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2779 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2780 
2781 		/* For XSK TX, we try to send as many as possible.
2782 		 * If XSK work done (XSK TX desc empty and budget still
2783 		 * available), return "budget - 1" to reenable TX IRQ.
2784 		 * Else, return "budget" to make NAPI continue polling.
2785 		 */
2786 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2787 					       STMMAC_XSK_TX_BUDGET_MAX);
2788 		if (work_done)
2789 			xmits = budget - 1;
2790 		else
2791 			xmits = budget;
2792 	}
2793 
2794 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2795 	    priv->eee_sw_timer_en) {
2796 		if (stmmac_enable_eee_mode(priv))
2797 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2798 	}
2799 
2800 	/* We still have pending packets, let's call for a new scheduling */
2801 	if (tx_q->dirty_tx != tx_q->cur_tx)
2802 		*pending_packets = true;
2803 
2804 	u64_stats_update_begin(&txq_stats->napi_syncp);
2805 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2806 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2807 	u64_stats_inc(&txq_stats->napi.tx_clean);
2808 	u64_stats_update_end(&txq_stats->napi_syncp);
2809 
2810 	priv->xstats.tx_errors += tx_errors;
2811 
2812 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2813 
2814 	/* Combine decisions from TX clean and XSK TX */
2815 	return max(count, xmits);
2816 }
2817 
2818 /**
2819  * stmmac_tx_err - to manage the tx error
2820  * @priv: driver private structure
2821  * @chan: channel index
2822  * Description: it cleans the descriptors and restarts the transmission
2823  * in case of transmission errors.
2824  */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2825 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2826 {
2827 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2828 
2829 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2830 
2831 	stmmac_stop_tx_dma(priv, chan);
2832 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2833 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2834 	stmmac_reset_tx_queue(priv, chan);
2835 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2836 			    tx_q->dma_tx_phy, chan);
2837 	stmmac_start_tx_dma(priv, chan);
2838 
2839 	priv->xstats.tx_errors++;
2840 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2841 }
2842 
2843 /**
2844  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2845  *  @priv: driver private structure
2846  *  @txmode: TX operating mode
2847  *  @rxmode: RX operating mode
2848  *  @chan: channel index
2849  *  Description: it is used for configuring of the DMA operation mode in
2850  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2851  *  mode.
2852  */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2853 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2854 					  u32 rxmode, u32 chan)
2855 {
2856 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2857 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2858 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2859 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2860 	int rxfifosz = priv->plat->rx_fifo_size;
2861 	int txfifosz = priv->plat->tx_fifo_size;
2862 
2863 	if (rxfifosz == 0)
2864 		rxfifosz = priv->dma_cap.rx_fifo_size;
2865 	if (txfifosz == 0)
2866 		txfifosz = priv->dma_cap.tx_fifo_size;
2867 
2868 	/* Adjust for real per queue fifo size */
2869 	rxfifosz /= rx_channels_count;
2870 	txfifosz /= tx_channels_count;
2871 
2872 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2873 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2874 }
2875 
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2876 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2877 {
2878 	int ret;
2879 
2880 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2881 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2882 	if (ret && (ret != -EINVAL)) {
2883 		stmmac_global_err(priv);
2884 		return true;
2885 	}
2886 
2887 	return false;
2888 }
2889 
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2890 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2891 {
2892 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2893 						 &priv->xstats, chan, dir);
2894 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2895 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2896 	struct stmmac_channel *ch = &priv->channel[chan];
2897 	struct napi_struct *rx_napi;
2898 	struct napi_struct *tx_napi;
2899 	unsigned long flags;
2900 
2901 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2902 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2903 
2904 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2905 		if (napi_schedule_prep(rx_napi)) {
2906 			spin_lock_irqsave(&ch->lock, flags);
2907 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2908 			spin_unlock_irqrestore(&ch->lock, flags);
2909 			__napi_schedule(rx_napi);
2910 		}
2911 	}
2912 
2913 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2914 		if (napi_schedule_prep(tx_napi)) {
2915 			spin_lock_irqsave(&ch->lock, flags);
2916 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2917 			spin_unlock_irqrestore(&ch->lock, flags);
2918 			__napi_schedule(tx_napi);
2919 		}
2920 	}
2921 
2922 	return status;
2923 }
2924 
2925 /**
2926  * stmmac_dma_interrupt - DMA ISR
2927  * @priv: driver private structure
2928  * Description: this is the DMA ISR. It is called by the main ISR.
2929  * It calls the dwmac dma routine and schedule poll method in case of some
2930  * work can be done.
2931  */
stmmac_dma_interrupt(struct stmmac_priv * priv)2932 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2933 {
2934 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2935 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2936 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2937 				tx_channel_count : rx_channel_count;
2938 	u32 chan;
2939 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2940 
2941 	/* Make sure we never check beyond our status buffer. */
2942 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2943 		channels_to_check = ARRAY_SIZE(status);
2944 
2945 	for (chan = 0; chan < channels_to_check; chan++)
2946 		status[chan] = stmmac_napi_check(priv, chan,
2947 						 DMA_DIR_RXTX);
2948 
2949 	for (chan = 0; chan < tx_channel_count; chan++) {
2950 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2951 			/* Try to bump up the dma threshold on this failure */
2952 			stmmac_bump_dma_threshold(priv, chan);
2953 		} else if (unlikely(status[chan] == tx_hard_error)) {
2954 			stmmac_tx_err(priv, chan);
2955 		}
2956 	}
2957 }
2958 
2959 /**
2960  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2961  * @priv: driver private structure
2962  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2963  */
stmmac_mmc_setup(struct stmmac_priv * priv)2964 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2965 {
2966 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2967 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2968 
2969 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2970 
2971 	if (priv->dma_cap.rmon) {
2972 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2973 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2974 	} else
2975 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2976 }
2977 
2978 /**
2979  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2980  * @priv: driver private structure
2981  * Description:
2982  *  new GMAC chip generations have a new register to indicate the
2983  *  presence of the optional feature/functions.
2984  *  This can be also used to override the value passed through the
2985  *  platform and necessary for old MAC10/100 and GMAC chips.
2986  */
stmmac_get_hw_features(struct stmmac_priv * priv)2987 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2988 {
2989 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2990 }
2991 
2992 /**
2993  * stmmac_check_ether_addr - check if the MAC addr is valid
2994  * @priv: driver private structure
2995  * Description:
2996  * it is to verify if the MAC address is valid, in case of failures it
2997  * generates a random MAC address
2998  */
stmmac_check_ether_addr(struct stmmac_priv * priv)2999 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3000 {
3001 	u8 addr[ETH_ALEN];
3002 
3003 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3004 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3005 		if (is_valid_ether_addr(addr))
3006 			eth_hw_addr_set(priv->dev, addr);
3007 		else
3008 			eth_hw_addr_random(priv->dev);
3009 		dev_info(priv->device, "device MAC address %pM\n",
3010 			 priv->dev->dev_addr);
3011 	}
3012 }
3013 
3014 /**
3015  * stmmac_init_dma_engine - DMA init.
3016  * @priv: driver private structure
3017  * Description:
3018  * It inits the DMA invoking the specific MAC/GMAC callback.
3019  * Some DMA parameters can be passed from the platform;
3020  * in case of these are not passed a default is kept for the MAC or GMAC.
3021  */
stmmac_init_dma_engine(struct stmmac_priv * priv)3022 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3023 {
3024 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3025 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3026 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3027 	struct stmmac_rx_queue *rx_q;
3028 	struct stmmac_tx_queue *tx_q;
3029 	u32 chan = 0;
3030 	int ret = 0;
3031 
3032 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3033 		dev_err(priv->device, "Invalid DMA configuration\n");
3034 		return -EINVAL;
3035 	}
3036 
3037 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3038 		priv->plat->dma_cfg->atds = 1;
3039 
3040 	ret = stmmac_reset(priv, priv->ioaddr);
3041 	if (ret) {
3042 		dev_err(priv->device, "Failed to reset the dma\n");
3043 		return ret;
3044 	}
3045 
3046 	/* DMA Configuration */
3047 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3048 
3049 	if (priv->plat->axi)
3050 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3051 
3052 	/* DMA CSR Channel configuration */
3053 	for (chan = 0; chan < dma_csr_ch; chan++) {
3054 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3055 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3056 	}
3057 
3058 	/* DMA RX Channel Configuration */
3059 	for (chan = 0; chan < rx_channels_count; chan++) {
3060 		rx_q = &priv->dma_conf.rx_queue[chan];
3061 
3062 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3063 				    rx_q->dma_rx_phy, chan);
3064 
3065 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3066 				     (rx_q->buf_alloc_num *
3067 				      sizeof(struct dma_desc));
3068 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3069 				       rx_q->rx_tail_addr, chan);
3070 	}
3071 
3072 	/* DMA TX Channel Configuration */
3073 	for (chan = 0; chan < tx_channels_count; chan++) {
3074 		tx_q = &priv->dma_conf.tx_queue[chan];
3075 
3076 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3077 				    tx_q->dma_tx_phy, chan);
3078 
3079 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3080 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3081 				       tx_q->tx_tail_addr, chan);
3082 	}
3083 
3084 	return ret;
3085 }
3086 
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3087 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3088 {
3089 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3090 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3091 	struct stmmac_channel *ch;
3092 	struct napi_struct *napi;
3093 
3094 	if (!tx_coal_timer)
3095 		return;
3096 
3097 	ch = &priv->channel[tx_q->queue_index];
3098 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3099 
3100 	/* Arm timer only if napi is not already scheduled.
3101 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3102 	 * again in the next scheduled napi.
3103 	 */
3104 	if (unlikely(!napi_is_scheduled(napi)))
3105 		hrtimer_start(&tx_q->txtimer,
3106 			      STMMAC_COAL_TIMER(tx_coal_timer),
3107 			      HRTIMER_MODE_REL);
3108 	else
3109 		hrtimer_try_to_cancel(&tx_q->txtimer);
3110 }
3111 
3112 /**
3113  * stmmac_tx_timer - mitigation sw timer for tx.
3114  * @t: data pointer
3115  * Description:
3116  * This is the timer handler to directly invoke the stmmac_tx_clean.
3117  */
stmmac_tx_timer(struct hrtimer * t)3118 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3119 {
3120 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3121 	struct stmmac_priv *priv = tx_q->priv_data;
3122 	struct stmmac_channel *ch;
3123 	struct napi_struct *napi;
3124 
3125 	ch = &priv->channel[tx_q->queue_index];
3126 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3127 
3128 	if (likely(napi_schedule_prep(napi))) {
3129 		unsigned long flags;
3130 
3131 		spin_lock_irqsave(&ch->lock, flags);
3132 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3133 		spin_unlock_irqrestore(&ch->lock, flags);
3134 		__napi_schedule(napi);
3135 	}
3136 
3137 	return HRTIMER_NORESTART;
3138 }
3139 
3140 /**
3141  * stmmac_init_coalesce - init mitigation options.
3142  * @priv: driver private structure
3143  * Description:
3144  * This inits the coalesce parameters: i.e. timer rate,
3145  * timer handler and default threshold used for enabling the
3146  * interrupt on completion bit.
3147  */
stmmac_init_coalesce(struct stmmac_priv * priv)3148 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3149 {
3150 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3151 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3152 	u32 chan;
3153 
3154 	for (chan = 0; chan < tx_channel_count; chan++) {
3155 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3156 
3157 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3158 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3159 
3160 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3161 		tx_q->txtimer.function = stmmac_tx_timer;
3162 	}
3163 
3164 	for (chan = 0; chan < rx_channel_count; chan++)
3165 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3166 }
3167 
stmmac_set_rings_length(struct stmmac_priv * priv)3168 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3169 {
3170 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3171 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3172 	u32 chan;
3173 
3174 	/* set TX ring length */
3175 	for (chan = 0; chan < tx_channels_count; chan++)
3176 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3177 				       (priv->dma_conf.dma_tx_size - 1), chan);
3178 
3179 	/* set RX ring length */
3180 	for (chan = 0; chan < rx_channels_count; chan++)
3181 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3182 				       (priv->dma_conf.dma_rx_size - 1), chan);
3183 }
3184 
3185 /**
3186  *  stmmac_set_tx_queue_weight - Set TX queue weight
3187  *  @priv: driver private structure
3188  *  Description: It is used for setting TX queues weight
3189  */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3190 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3191 {
3192 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3193 	u32 weight;
3194 	u32 queue;
3195 
3196 	for (queue = 0; queue < tx_queues_count; queue++) {
3197 		weight = priv->plat->tx_queues_cfg[queue].weight;
3198 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3199 	}
3200 }
3201 
3202 /**
3203  *  stmmac_configure_cbs - Configure CBS in TX queue
3204  *  @priv: driver private structure
3205  *  Description: It is used for configuring CBS in AVB TX queues
3206  */
stmmac_configure_cbs(struct stmmac_priv * priv)3207 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3208 {
3209 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3210 	u32 mode_to_use;
3211 	u32 queue;
3212 
3213 	/* queue 0 is reserved for legacy traffic */
3214 	for (queue = 1; queue < tx_queues_count; queue++) {
3215 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3216 		if (mode_to_use == MTL_QUEUE_DCB)
3217 			continue;
3218 
3219 		stmmac_config_cbs(priv, priv->hw,
3220 				priv->plat->tx_queues_cfg[queue].send_slope,
3221 				priv->plat->tx_queues_cfg[queue].idle_slope,
3222 				priv->plat->tx_queues_cfg[queue].high_credit,
3223 				priv->plat->tx_queues_cfg[queue].low_credit,
3224 				queue);
3225 	}
3226 }
3227 
3228 /**
3229  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3230  *  @priv: driver private structure
3231  *  Description: It is used for mapping RX queues to RX dma channels
3232  */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3233 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3234 {
3235 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3236 	u32 queue;
3237 	u32 chan;
3238 
3239 	for (queue = 0; queue < rx_queues_count; queue++) {
3240 		chan = priv->plat->rx_queues_cfg[queue].chan;
3241 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3242 	}
3243 }
3244 
3245 /**
3246  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3247  *  @priv: driver private structure
3248  *  Description: It is used for configuring the RX Queue Priority
3249  */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3250 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3251 {
3252 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3253 	u32 queue;
3254 	u32 prio;
3255 
3256 	for (queue = 0; queue < rx_queues_count; queue++) {
3257 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3258 			continue;
3259 
3260 		prio = priv->plat->rx_queues_cfg[queue].prio;
3261 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3262 	}
3263 }
3264 
3265 /**
3266  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3267  *  @priv: driver private structure
3268  *  Description: It is used for configuring the TX Queue Priority
3269  */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3270 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3271 {
3272 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3273 	u32 queue;
3274 	u32 prio;
3275 
3276 	for (queue = 0; queue < tx_queues_count; queue++) {
3277 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3278 			continue;
3279 
3280 		prio = priv->plat->tx_queues_cfg[queue].prio;
3281 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3282 	}
3283 }
3284 
3285 /**
3286  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3287  *  @priv: driver private structure
3288  *  Description: It is used for configuring the RX queue routing
3289  */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3290 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3291 {
3292 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3293 	u32 queue;
3294 	u8 packet;
3295 
3296 	for (queue = 0; queue < rx_queues_count; queue++) {
3297 		/* no specific packet type routing specified for the queue */
3298 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3299 			continue;
3300 
3301 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3302 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3303 	}
3304 }
3305 
stmmac_mac_config_rss(struct stmmac_priv * priv)3306 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3307 {
3308 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3309 		priv->rss.enable = false;
3310 		return;
3311 	}
3312 
3313 	if (priv->dev->features & NETIF_F_RXHASH)
3314 		priv->rss.enable = true;
3315 	else
3316 		priv->rss.enable = false;
3317 
3318 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3319 			     priv->plat->rx_queues_to_use);
3320 }
3321 
3322 /**
3323  *  stmmac_mtl_configuration - Configure MTL
3324  *  @priv: driver private structure
3325  *  Description: It is used for configurring MTL
3326  */
stmmac_mtl_configuration(struct stmmac_priv * priv)3327 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3328 {
3329 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3330 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3331 
3332 	if (tx_queues_count > 1)
3333 		stmmac_set_tx_queue_weight(priv);
3334 
3335 	/* Configure MTL RX algorithms */
3336 	if (rx_queues_count > 1)
3337 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3338 				priv->plat->rx_sched_algorithm);
3339 
3340 	/* Configure MTL TX algorithms */
3341 	if (tx_queues_count > 1)
3342 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3343 				priv->plat->tx_sched_algorithm);
3344 
3345 	/* Configure CBS in AVB TX queues */
3346 	if (tx_queues_count > 1)
3347 		stmmac_configure_cbs(priv);
3348 
3349 	/* Map RX MTL to DMA channels */
3350 	stmmac_rx_queue_dma_chan_map(priv);
3351 
3352 	/* Enable MAC RX Queues */
3353 	stmmac_mac_enable_rx_queues(priv);
3354 
3355 	/* Set RX priorities */
3356 	if (rx_queues_count > 1)
3357 		stmmac_mac_config_rx_queues_prio(priv);
3358 
3359 	/* Set TX priorities */
3360 	if (tx_queues_count > 1)
3361 		stmmac_mac_config_tx_queues_prio(priv);
3362 
3363 	/* Set RX routing */
3364 	if (rx_queues_count > 1)
3365 		stmmac_mac_config_rx_queues_routing(priv);
3366 
3367 	/* Receive Side Scaling */
3368 	if (rx_queues_count > 1)
3369 		stmmac_mac_config_rss(priv);
3370 }
3371 
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3372 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3373 {
3374 	if (priv->dma_cap.asp) {
3375 		netdev_info(priv->dev, "Enabling Safety Features\n");
3376 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3377 					  priv->plat->safety_feat_cfg);
3378 	} else {
3379 		netdev_info(priv->dev, "No Safety Features support found\n");
3380 	}
3381 }
3382 
3383 /**
3384  * stmmac_hw_setup - setup mac in a usable state.
3385  *  @dev : pointer to the device structure.
3386  *  @ptp_register: register PTP if set
3387  *  Description:
3388  *  this is the main function to setup the HW in a usable state because the
3389  *  dma engine is reset, the core registers are configured (e.g. AXI,
3390  *  Checksum features, timers). The DMA is ready to start receiving and
3391  *  transmitting.
3392  *  Return value:
3393  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3394  *  file on failure.
3395  */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)3396 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3397 {
3398 	struct stmmac_priv *priv = netdev_priv(dev);
3399 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3400 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3401 	bool sph_en;
3402 	u32 chan;
3403 	int ret;
3404 
3405 	/* Make sure RX clock is enabled */
3406 	if (priv->hw->phylink_pcs)
3407 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3408 
3409 	/* DMA initialization and SW reset */
3410 	ret = stmmac_init_dma_engine(priv);
3411 	if (ret < 0) {
3412 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3413 			   __func__);
3414 		return ret;
3415 	}
3416 
3417 	/* Copy the MAC addr into the HW  */
3418 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3419 
3420 	/* PS and related bits will be programmed according to the speed */
3421 	if (priv->hw->pcs) {
3422 		int speed = priv->plat->mac_port_sel_speed;
3423 
3424 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3425 		    (speed == SPEED_1000)) {
3426 			priv->hw->ps = speed;
3427 		} else {
3428 			dev_warn(priv->device, "invalid port speed\n");
3429 			priv->hw->ps = 0;
3430 		}
3431 	}
3432 
3433 	/* Initialize the MAC Core */
3434 	stmmac_core_init(priv, priv->hw, dev);
3435 
3436 	/* Initialize MTL*/
3437 	stmmac_mtl_configuration(priv);
3438 
3439 	/* Initialize Safety Features */
3440 	stmmac_safety_feat_configuration(priv);
3441 
3442 	ret = stmmac_rx_ipc(priv, priv->hw);
3443 	if (!ret) {
3444 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3445 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3446 		priv->hw->rx_csum = 0;
3447 	}
3448 
3449 	/* Enable the MAC Rx/Tx */
3450 	stmmac_mac_set(priv, priv->ioaddr, true);
3451 
3452 	/* Set the HW DMA mode and the COE */
3453 	stmmac_dma_operation_mode(priv);
3454 
3455 	stmmac_mmc_setup(priv);
3456 
3457 	if (ptp_register) {
3458 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3459 		if (ret < 0)
3460 			netdev_warn(priv->dev,
3461 				    "failed to enable PTP reference clock: %pe\n",
3462 				    ERR_PTR(ret));
3463 	}
3464 
3465 	ret = stmmac_init_ptp(priv);
3466 	if (ret == -EOPNOTSUPP)
3467 		netdev_info(priv->dev, "PTP not supported by HW\n");
3468 	else if (ret)
3469 		netdev_warn(priv->dev, "PTP init failed\n");
3470 	else if (ptp_register)
3471 		stmmac_ptp_register(priv);
3472 
3473 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3474 
3475 	/* Convert the timer from msec to usec */
3476 	if (!priv->tx_lpi_timer)
3477 		priv->tx_lpi_timer = eee_timer * 1000;
3478 
3479 	if (priv->use_riwt) {
3480 		u32 queue;
3481 
3482 		for (queue = 0; queue < rx_cnt; queue++) {
3483 			if (!priv->rx_riwt[queue])
3484 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3485 
3486 			stmmac_rx_watchdog(priv, priv->ioaddr,
3487 					   priv->rx_riwt[queue], queue);
3488 		}
3489 	}
3490 
3491 	if (priv->hw->pcs)
3492 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3493 
3494 	/* set TX and RX rings length */
3495 	stmmac_set_rings_length(priv);
3496 
3497 	/* Enable TSO */
3498 	if (priv->tso) {
3499 		for (chan = 0; chan < tx_cnt; chan++) {
3500 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3501 
3502 			/* TSO and TBS cannot co-exist */
3503 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3504 				continue;
3505 
3506 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3507 		}
3508 	}
3509 
3510 	/* Enable Split Header */
3511 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3512 	for (chan = 0; chan < rx_cnt; chan++)
3513 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3514 
3515 
3516 	/* VLAN Tag Insertion */
3517 	if (priv->dma_cap.vlins)
3518 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3519 
3520 	/* TBS */
3521 	for (chan = 0; chan < tx_cnt; chan++) {
3522 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3523 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3524 
3525 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3526 	}
3527 
3528 	/* Configure real RX and TX queues */
3529 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3530 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3531 
3532 	/* Start the ball rolling... */
3533 	stmmac_start_all_dma(priv);
3534 
3535 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3536 
3537 	return 0;
3538 }
3539 
stmmac_hw_teardown(struct net_device * dev)3540 static void stmmac_hw_teardown(struct net_device *dev)
3541 {
3542 	struct stmmac_priv *priv = netdev_priv(dev);
3543 
3544 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3545 }
3546 
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3547 static void stmmac_free_irq(struct net_device *dev,
3548 			    enum request_irq_err irq_err, int irq_idx)
3549 {
3550 	struct stmmac_priv *priv = netdev_priv(dev);
3551 	int j;
3552 
3553 	switch (irq_err) {
3554 	case REQ_IRQ_ERR_ALL:
3555 		irq_idx = priv->plat->tx_queues_to_use;
3556 		fallthrough;
3557 	case REQ_IRQ_ERR_TX:
3558 		for (j = irq_idx - 1; j >= 0; j--) {
3559 			if (priv->tx_irq[j] > 0) {
3560 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3561 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3562 			}
3563 		}
3564 		irq_idx = priv->plat->rx_queues_to_use;
3565 		fallthrough;
3566 	case REQ_IRQ_ERR_RX:
3567 		for (j = irq_idx - 1; j >= 0; j--) {
3568 			if (priv->rx_irq[j] > 0) {
3569 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3570 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3571 			}
3572 		}
3573 
3574 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3575 			free_irq(priv->sfty_ue_irq, dev);
3576 		fallthrough;
3577 	case REQ_IRQ_ERR_SFTY_UE:
3578 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3579 			free_irq(priv->sfty_ce_irq, dev);
3580 		fallthrough;
3581 	case REQ_IRQ_ERR_SFTY_CE:
3582 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3583 			free_irq(priv->lpi_irq, dev);
3584 		fallthrough;
3585 	case REQ_IRQ_ERR_LPI:
3586 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3587 			free_irq(priv->wol_irq, dev);
3588 		fallthrough;
3589 	case REQ_IRQ_ERR_SFTY:
3590 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3591 			free_irq(priv->sfty_irq, dev);
3592 		fallthrough;
3593 	case REQ_IRQ_ERR_WOL:
3594 		free_irq(dev->irq, dev);
3595 		fallthrough;
3596 	case REQ_IRQ_ERR_MAC:
3597 	case REQ_IRQ_ERR_NO:
3598 		/* If MAC IRQ request error, no more IRQ to free */
3599 		break;
3600 	}
3601 }
3602 
stmmac_request_irq_multi_msi(struct net_device * dev)3603 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3604 {
3605 	struct stmmac_priv *priv = netdev_priv(dev);
3606 	enum request_irq_err irq_err;
3607 	int irq_idx = 0;
3608 	char *int_name;
3609 	int ret;
3610 	int i;
3611 
3612 	/* For common interrupt */
3613 	int_name = priv->int_name_mac;
3614 	sprintf(int_name, "%s:%s", dev->name, "mac");
3615 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3616 			  0, int_name, dev);
3617 	if (unlikely(ret < 0)) {
3618 		netdev_err(priv->dev,
3619 			   "%s: alloc mac MSI %d (error: %d)\n",
3620 			   __func__, dev->irq, ret);
3621 		irq_err = REQ_IRQ_ERR_MAC;
3622 		goto irq_error;
3623 	}
3624 
3625 	/* Request the Wake IRQ in case of another line
3626 	 * is used for WoL
3627 	 */
3628 	priv->wol_irq_disabled = true;
3629 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3630 		int_name = priv->int_name_wol;
3631 		sprintf(int_name, "%s:%s", dev->name, "wol");
3632 		ret = request_irq(priv->wol_irq,
3633 				  stmmac_mac_interrupt,
3634 				  0, int_name, dev);
3635 		if (unlikely(ret < 0)) {
3636 			netdev_err(priv->dev,
3637 				   "%s: alloc wol MSI %d (error: %d)\n",
3638 				   __func__, priv->wol_irq, ret);
3639 			irq_err = REQ_IRQ_ERR_WOL;
3640 			goto irq_error;
3641 		}
3642 	}
3643 
3644 	/* Request the LPI IRQ in case of another line
3645 	 * is used for LPI
3646 	 */
3647 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3648 		int_name = priv->int_name_lpi;
3649 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3650 		ret = request_irq(priv->lpi_irq,
3651 				  stmmac_mac_interrupt,
3652 				  0, int_name, dev);
3653 		if (unlikely(ret < 0)) {
3654 			netdev_err(priv->dev,
3655 				   "%s: alloc lpi MSI %d (error: %d)\n",
3656 				   __func__, priv->lpi_irq, ret);
3657 			irq_err = REQ_IRQ_ERR_LPI;
3658 			goto irq_error;
3659 		}
3660 	}
3661 
3662 	/* Request the common Safety Feature Correctible/Uncorrectible
3663 	 * Error line in case of another line is used
3664 	 */
3665 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3666 		int_name = priv->int_name_sfty;
3667 		sprintf(int_name, "%s:%s", dev->name, "safety");
3668 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3669 				  0, int_name, dev);
3670 		if (unlikely(ret < 0)) {
3671 			netdev_err(priv->dev,
3672 				   "%s: alloc sfty MSI %d (error: %d)\n",
3673 				   __func__, priv->sfty_irq, ret);
3674 			irq_err = REQ_IRQ_ERR_SFTY;
3675 			goto irq_error;
3676 		}
3677 	}
3678 
3679 	/* Request the Safety Feature Correctible Error line in
3680 	 * case of another line is used
3681 	 */
3682 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3683 		int_name = priv->int_name_sfty_ce;
3684 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3685 		ret = request_irq(priv->sfty_ce_irq,
3686 				  stmmac_safety_interrupt,
3687 				  0, int_name, dev);
3688 		if (unlikely(ret < 0)) {
3689 			netdev_err(priv->dev,
3690 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3691 				   __func__, priv->sfty_ce_irq, ret);
3692 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3693 			goto irq_error;
3694 		}
3695 	}
3696 
3697 	/* Request the Safety Feature Uncorrectible Error line in
3698 	 * case of another line is used
3699 	 */
3700 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3701 		int_name = priv->int_name_sfty_ue;
3702 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3703 		ret = request_irq(priv->sfty_ue_irq,
3704 				  stmmac_safety_interrupt,
3705 				  0, int_name, dev);
3706 		if (unlikely(ret < 0)) {
3707 			netdev_err(priv->dev,
3708 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3709 				   __func__, priv->sfty_ue_irq, ret);
3710 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3711 			goto irq_error;
3712 		}
3713 	}
3714 
3715 	/* Request Rx MSI irq */
3716 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3717 		if (i >= MTL_MAX_RX_QUEUES)
3718 			break;
3719 		if (priv->rx_irq[i] == 0)
3720 			continue;
3721 
3722 		int_name = priv->int_name_rx_irq[i];
3723 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3724 		ret = request_irq(priv->rx_irq[i],
3725 				  stmmac_msi_intr_rx,
3726 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3727 		if (unlikely(ret < 0)) {
3728 			netdev_err(priv->dev,
3729 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3730 				   __func__, i, priv->rx_irq[i], ret);
3731 			irq_err = REQ_IRQ_ERR_RX;
3732 			irq_idx = i;
3733 			goto irq_error;
3734 		}
3735 		irq_set_affinity_hint(priv->rx_irq[i],
3736 				      cpumask_of(i % num_online_cpus()));
3737 	}
3738 
3739 	/* Request Tx MSI irq */
3740 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3741 		if (i >= MTL_MAX_TX_QUEUES)
3742 			break;
3743 		if (priv->tx_irq[i] == 0)
3744 			continue;
3745 
3746 		int_name = priv->int_name_tx_irq[i];
3747 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3748 		ret = request_irq(priv->tx_irq[i],
3749 				  stmmac_msi_intr_tx,
3750 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3751 		if (unlikely(ret < 0)) {
3752 			netdev_err(priv->dev,
3753 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3754 				   __func__, i, priv->tx_irq[i], ret);
3755 			irq_err = REQ_IRQ_ERR_TX;
3756 			irq_idx = i;
3757 			goto irq_error;
3758 		}
3759 		irq_set_affinity_hint(priv->tx_irq[i],
3760 				      cpumask_of(i % num_online_cpus()));
3761 	}
3762 
3763 	return 0;
3764 
3765 irq_error:
3766 	stmmac_free_irq(dev, irq_err, irq_idx);
3767 	return ret;
3768 }
3769 
stmmac_request_irq_single(struct net_device * dev)3770 static int stmmac_request_irq_single(struct net_device *dev)
3771 {
3772 	struct stmmac_priv *priv = netdev_priv(dev);
3773 	enum request_irq_err irq_err;
3774 	int ret;
3775 
3776 	ret = request_irq(dev->irq, stmmac_interrupt,
3777 			  IRQF_SHARED, dev->name, dev);
3778 	if (unlikely(ret < 0)) {
3779 		netdev_err(priv->dev,
3780 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3781 			   __func__, dev->irq, ret);
3782 		irq_err = REQ_IRQ_ERR_MAC;
3783 		goto irq_error;
3784 	}
3785 
3786 	/* Request the Wake IRQ in case of another line
3787 	 * is used for WoL
3788 	 */
3789 	priv->wol_irq_disabled = true;
3790 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3791 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3792 				  IRQF_SHARED, dev->name, dev);
3793 		if (unlikely(ret < 0)) {
3794 			netdev_err(priv->dev,
3795 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3796 				   __func__, priv->wol_irq, ret);
3797 			irq_err = REQ_IRQ_ERR_WOL;
3798 			goto irq_error;
3799 		}
3800 	}
3801 
3802 	/* Request the IRQ lines */
3803 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3804 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3805 				  IRQF_SHARED, dev->name, dev);
3806 		if (unlikely(ret < 0)) {
3807 			netdev_err(priv->dev,
3808 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3809 				   __func__, priv->lpi_irq, ret);
3810 			irq_err = REQ_IRQ_ERR_LPI;
3811 			goto irq_error;
3812 		}
3813 	}
3814 
3815 	/* Request the common Safety Feature Correctible/Uncorrectible
3816 	 * Error line in case of another line is used
3817 	 */
3818 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3819 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3820 				  IRQF_SHARED, dev->name, dev);
3821 		if (unlikely(ret < 0)) {
3822 			netdev_err(priv->dev,
3823 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3824 				   __func__, priv->sfty_irq, ret);
3825 			irq_err = REQ_IRQ_ERR_SFTY;
3826 			goto irq_error;
3827 		}
3828 	}
3829 
3830 	return 0;
3831 
3832 irq_error:
3833 	stmmac_free_irq(dev, irq_err, 0);
3834 	return ret;
3835 }
3836 
stmmac_request_irq(struct net_device * dev)3837 static int stmmac_request_irq(struct net_device *dev)
3838 {
3839 	struct stmmac_priv *priv = netdev_priv(dev);
3840 	int ret;
3841 
3842 	/* Request the IRQ lines */
3843 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3844 		ret = stmmac_request_irq_multi_msi(dev);
3845 	else
3846 		ret = stmmac_request_irq_single(dev);
3847 
3848 	return ret;
3849 }
3850 
3851 /**
3852  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3853  *  @priv: driver private structure
3854  *  @mtu: MTU to setup the dma queue and buf with
3855  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3856  *  Allocate the Tx/Rx DMA queue and init them.
3857  *  Return value:
3858  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3859  */
3860 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3861 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3862 {
3863 	struct stmmac_dma_conf *dma_conf;
3864 	int chan, bfsize, ret;
3865 
3866 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3867 	if (!dma_conf) {
3868 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3869 			   __func__);
3870 		return ERR_PTR(-ENOMEM);
3871 	}
3872 
3873 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3874 	if (bfsize < 0)
3875 		bfsize = 0;
3876 
3877 	if (bfsize < BUF_SIZE_16KiB)
3878 		bfsize = stmmac_set_bfsize(mtu, 0);
3879 
3880 	dma_conf->dma_buf_sz = bfsize;
3881 	/* Chose the tx/rx size from the already defined one in the
3882 	 * priv struct. (if defined)
3883 	 */
3884 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3885 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3886 
3887 	if (!dma_conf->dma_tx_size)
3888 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3889 	if (!dma_conf->dma_rx_size)
3890 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3891 
3892 	/* Earlier check for TBS */
3893 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3894 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3895 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3896 
3897 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3898 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3899 	}
3900 
3901 	ret = alloc_dma_desc_resources(priv, dma_conf);
3902 	if (ret < 0) {
3903 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3904 			   __func__);
3905 		goto alloc_error;
3906 	}
3907 
3908 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3909 	if (ret < 0) {
3910 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3911 			   __func__);
3912 		goto init_error;
3913 	}
3914 
3915 	return dma_conf;
3916 
3917 init_error:
3918 	free_dma_desc_resources(priv, dma_conf);
3919 alloc_error:
3920 	kfree(dma_conf);
3921 	return ERR_PTR(ret);
3922 }
3923 
3924 /**
3925  *  __stmmac_open - open entry point of the driver
3926  *  @dev : pointer to the device structure.
3927  *  @dma_conf :  structure to take the dma data
3928  *  Description:
3929  *  This function is the open entry point of the driver.
3930  *  Return value:
3931  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3932  *  file on failure.
3933  */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3934 static int __stmmac_open(struct net_device *dev,
3935 			 struct stmmac_dma_conf *dma_conf)
3936 {
3937 	struct stmmac_priv *priv = netdev_priv(dev);
3938 	int mode = priv->plat->phy_interface;
3939 	u32 chan;
3940 	int ret;
3941 
3942 	ret = pm_runtime_resume_and_get(priv->device);
3943 	if (ret < 0)
3944 		return ret;
3945 
3946 	if ((!priv->hw->xpcs ||
3947 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3948 		ret = stmmac_init_phy(dev);
3949 		if (ret) {
3950 			netdev_err(priv->dev,
3951 				   "%s: Cannot attach to PHY (error: %d)\n",
3952 				   __func__, ret);
3953 			goto init_phy_error;
3954 		}
3955 	}
3956 
3957 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3958 
3959 	buf_sz = dma_conf->dma_buf_sz;
3960 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3961 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3962 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3963 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3964 
3965 	stmmac_reset_queues_param(priv);
3966 
3967 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3968 	    priv->plat->serdes_powerup) {
3969 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3970 		if (ret < 0) {
3971 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3972 				   __func__);
3973 			goto init_error;
3974 		}
3975 	}
3976 
3977 	ret = stmmac_hw_setup(dev, true);
3978 	if (ret < 0) {
3979 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3980 		goto init_error;
3981 	}
3982 
3983 	stmmac_init_coalesce(priv);
3984 
3985 	phylink_start(priv->phylink);
3986 	/* We may have called phylink_speed_down before */
3987 	phylink_speed_up(priv->phylink);
3988 
3989 	ret = stmmac_request_irq(dev);
3990 	if (ret)
3991 		goto irq_error;
3992 
3993 	stmmac_enable_all_queues(priv);
3994 	netif_tx_start_all_queues(priv->dev);
3995 	stmmac_enable_all_dma_irq(priv);
3996 
3997 	return 0;
3998 
3999 irq_error:
4000 	phylink_stop(priv->phylink);
4001 
4002 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4003 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4004 
4005 	stmmac_hw_teardown(dev);
4006 init_error:
4007 	phylink_disconnect_phy(priv->phylink);
4008 init_phy_error:
4009 	pm_runtime_put(priv->device);
4010 	return ret;
4011 }
4012 
stmmac_open(struct net_device * dev)4013 static int stmmac_open(struct net_device *dev)
4014 {
4015 	struct stmmac_priv *priv = netdev_priv(dev);
4016 	struct stmmac_dma_conf *dma_conf;
4017 	int ret;
4018 
4019 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4020 	if (IS_ERR(dma_conf))
4021 		return PTR_ERR(dma_conf);
4022 
4023 	ret = __stmmac_open(dev, dma_conf);
4024 	if (ret)
4025 		free_dma_desc_resources(priv, dma_conf);
4026 
4027 	kfree(dma_conf);
4028 	return ret;
4029 }
4030 
4031 /**
4032  *  stmmac_release - close entry point of the driver
4033  *  @dev : device pointer.
4034  *  Description:
4035  *  This is the stop entry point of the driver.
4036  */
stmmac_release(struct net_device * dev)4037 static int stmmac_release(struct net_device *dev)
4038 {
4039 	struct stmmac_priv *priv = netdev_priv(dev);
4040 	u32 chan;
4041 
4042 	if (device_may_wakeup(priv->device))
4043 		phylink_speed_down(priv->phylink, false);
4044 	/* Stop and disconnect the PHY */
4045 	phylink_stop(priv->phylink);
4046 	phylink_disconnect_phy(priv->phylink);
4047 
4048 	stmmac_disable_all_queues(priv);
4049 
4050 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4051 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4052 
4053 	netif_tx_disable(dev);
4054 
4055 	/* Free the IRQ lines */
4056 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4057 
4058 	if (priv->eee_enabled) {
4059 		priv->tx_path_in_lpi_mode = false;
4060 		del_timer_sync(&priv->eee_ctrl_timer);
4061 	}
4062 
4063 	/* Stop TX/RX DMA and clear the descriptors */
4064 	stmmac_stop_all_dma(priv);
4065 
4066 	/* Release and free the Rx/Tx resources */
4067 	free_dma_desc_resources(priv, &priv->dma_conf);
4068 
4069 	/* Disable the MAC Rx/Tx */
4070 	stmmac_mac_set(priv, priv->ioaddr, false);
4071 
4072 	/* Powerdown Serdes if there is */
4073 	if (priv->plat->serdes_powerdown)
4074 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4075 
4076 	stmmac_release_ptp(priv);
4077 
4078 	if (priv->dma_cap.fpesel)
4079 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4080 
4081 	pm_runtime_put(priv->device);
4082 
4083 	return 0;
4084 }
4085 
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)4086 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4087 			       struct stmmac_tx_queue *tx_q)
4088 {
4089 	u16 tag = 0x0, inner_tag = 0x0;
4090 	u32 inner_type = 0x0;
4091 	struct dma_desc *p;
4092 
4093 	if (!priv->dma_cap.vlins)
4094 		return false;
4095 	if (!skb_vlan_tag_present(skb))
4096 		return false;
4097 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4098 		inner_tag = skb_vlan_tag_get(skb);
4099 		inner_type = STMMAC_VLAN_INSERT;
4100 	}
4101 
4102 	tag = skb_vlan_tag_get(skb);
4103 
4104 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4105 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4106 	else
4107 		p = &tx_q->dma_tx[tx_q->cur_tx];
4108 
4109 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4110 		return false;
4111 
4112 	stmmac_set_tx_owner(priv, p);
4113 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4114 	return true;
4115 }
4116 
4117 /**
4118  *  stmmac_tso_allocator - close entry point of the driver
4119  *  @priv: driver private structure
4120  *  @des: buffer start address
4121  *  @total_len: total length to fill in descriptors
4122  *  @last_segment: condition for the last descriptor
4123  *  @queue: TX queue index
4124  *  Description:
4125  *  This function fills descriptor and request new descriptors according to
4126  *  buffer length to fill
4127  */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4128 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4129 				 int total_len, bool last_segment, u32 queue)
4130 {
4131 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4132 	struct dma_desc *desc;
4133 	u32 buff_size;
4134 	int tmp_len;
4135 
4136 	tmp_len = total_len;
4137 
4138 	while (tmp_len > 0) {
4139 		dma_addr_t curr_addr;
4140 
4141 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4142 						priv->dma_conf.dma_tx_size);
4143 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4144 
4145 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4146 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4147 		else
4148 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4149 
4150 		curr_addr = des + (total_len - tmp_len);
4151 		if (priv->dma_cap.addr64 <= 32)
4152 			desc->des0 = cpu_to_le32(curr_addr);
4153 		else
4154 			stmmac_set_desc_addr(priv, desc, curr_addr);
4155 
4156 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4157 			    TSO_MAX_BUFF_SIZE : tmp_len;
4158 
4159 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4160 				0, 1,
4161 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4162 				0, 0);
4163 
4164 		tmp_len -= TSO_MAX_BUFF_SIZE;
4165 	}
4166 }
4167 
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4168 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4169 {
4170 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4171 	int desc_size;
4172 
4173 	if (likely(priv->extend_desc))
4174 		desc_size = sizeof(struct dma_extended_desc);
4175 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4176 		desc_size = sizeof(struct dma_edesc);
4177 	else
4178 		desc_size = sizeof(struct dma_desc);
4179 
4180 	/* The own bit must be the latest setting done when prepare the
4181 	 * descriptor and then barrier is needed to make sure that
4182 	 * all is coherent before granting the DMA engine.
4183 	 */
4184 	wmb();
4185 
4186 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4187 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4188 }
4189 
4190 /**
4191  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4192  *  @skb : the socket buffer
4193  *  @dev : device pointer
4194  *  Description: this is the transmit function that is called on TSO frames
4195  *  (support available on GMAC4 and newer chips).
4196  *  Diagram below show the ring programming in case of TSO frames:
4197  *
4198  *  First Descriptor
4199  *   --------
4200  *   | DES0 |---> buffer1 = L2/L3/L4 header
4201  *   | DES1 |---> TCP Payload (can continue on next descr...)
4202  *   | DES2 |---> buffer 1 and 2 len
4203  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4204  *   --------
4205  *	|
4206  *     ...
4207  *	|
4208  *   --------
4209  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4210  *   | DES1 | --|
4211  *   | DES2 | --> buffer 1 and 2 len
4212  *   | DES3 |
4213  *   --------
4214  *
4215  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4216  */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4217 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4218 {
4219 	struct dma_desc *desc, *first, *mss_desc = NULL;
4220 	struct stmmac_priv *priv = netdev_priv(dev);
4221 	int tmp_pay_len = 0, first_tx, nfrags;
4222 	unsigned int first_entry, tx_packets;
4223 	struct stmmac_txq_stats *txq_stats;
4224 	struct stmmac_tx_queue *tx_q;
4225 	u32 pay_len, mss, queue;
4226 	dma_addr_t tso_des, des;
4227 	u8 proto_hdr_len, hdr;
4228 	bool set_ic;
4229 	int i;
4230 
4231 	/* Always insert VLAN tag to SKB payload for TSO frames.
4232 	 *
4233 	 * Never insert VLAN tag by HW, since segments splited by
4234 	 * TSO engine will be un-tagged by mistake.
4235 	 */
4236 	if (skb_vlan_tag_present(skb)) {
4237 		skb = __vlan_hwaccel_push_inside(skb);
4238 		if (unlikely(!skb)) {
4239 			priv->xstats.tx_dropped++;
4240 			return NETDEV_TX_OK;
4241 		}
4242 	}
4243 
4244 	nfrags = skb_shinfo(skb)->nr_frags;
4245 	queue = skb_get_queue_mapping(skb);
4246 
4247 	tx_q = &priv->dma_conf.tx_queue[queue];
4248 	txq_stats = &priv->xstats.txq_stats[queue];
4249 	first_tx = tx_q->cur_tx;
4250 
4251 	/* Compute header lengths */
4252 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4253 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4254 		hdr = sizeof(struct udphdr);
4255 	} else {
4256 		proto_hdr_len = skb_tcp_all_headers(skb);
4257 		hdr = tcp_hdrlen(skb);
4258 	}
4259 
4260 	/* Desc availability based on threshold should be enough safe */
4261 	if (unlikely(stmmac_tx_avail(priv, queue) <
4262 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4263 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4264 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4265 								queue));
4266 			/* This is a hard error, log it. */
4267 			netdev_err(priv->dev,
4268 				   "%s: Tx Ring full when queue awake\n",
4269 				   __func__);
4270 		}
4271 		return NETDEV_TX_BUSY;
4272 	}
4273 
4274 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4275 
4276 	mss = skb_shinfo(skb)->gso_size;
4277 
4278 	/* set new MSS value if needed */
4279 	if (mss != tx_q->mss) {
4280 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4281 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4282 		else
4283 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4284 
4285 		stmmac_set_mss(priv, mss_desc, mss);
4286 		tx_q->mss = mss;
4287 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4288 						priv->dma_conf.dma_tx_size);
4289 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4290 	}
4291 
4292 	if (netif_msg_tx_queued(priv)) {
4293 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4294 			__func__, hdr, proto_hdr_len, pay_len, mss);
4295 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4296 			skb->data_len);
4297 	}
4298 
4299 	first_entry = tx_q->cur_tx;
4300 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4301 
4302 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4303 		desc = &tx_q->dma_entx[first_entry].basic;
4304 	else
4305 		desc = &tx_q->dma_tx[first_entry];
4306 	first = desc;
4307 
4308 	/* first descriptor: fill Headers on Buf1 */
4309 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4310 			     DMA_TO_DEVICE);
4311 	if (dma_mapping_error(priv->device, des))
4312 		goto dma_map_err;
4313 
4314 	if (priv->dma_cap.addr64 <= 32) {
4315 		first->des0 = cpu_to_le32(des);
4316 
4317 		/* Fill start of payload in buff2 of first descriptor */
4318 		if (pay_len)
4319 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4320 
4321 		/* If needed take extra descriptors to fill the remaining payload */
4322 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4323 		tso_des = des;
4324 	} else {
4325 		stmmac_set_desc_addr(priv, first, des);
4326 		tmp_pay_len = pay_len;
4327 		tso_des = des + proto_hdr_len;
4328 		pay_len = 0;
4329 	}
4330 
4331 	stmmac_tso_allocator(priv, tso_des, tmp_pay_len, (nfrags == 0), queue);
4332 
4333 	/* In case two or more DMA transmit descriptors are allocated for this
4334 	 * non-paged SKB data, the DMA buffer address should be saved to
4335 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4336 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4337 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4338 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4339 	 * sooner or later.
4340 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4341 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4342 	 * this DMA buffer right after the DMA engine completely finishes the
4343 	 * full buffer transmission.
4344 	 */
4345 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4346 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4347 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4348 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4349 
4350 	/* Prepare fragments */
4351 	for (i = 0; i < nfrags; i++) {
4352 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4353 
4354 		des = skb_frag_dma_map(priv->device, frag, 0,
4355 				       skb_frag_size(frag),
4356 				       DMA_TO_DEVICE);
4357 		if (dma_mapping_error(priv->device, des))
4358 			goto dma_map_err;
4359 
4360 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4361 				     (i == nfrags - 1), queue);
4362 
4363 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4364 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4365 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4366 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4367 	}
4368 
4369 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4370 
4371 	/* Only the last descriptor gets to point to the skb. */
4372 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4373 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4374 
4375 	/* Manage tx mitigation */
4376 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4377 	tx_q->tx_count_frames += tx_packets;
4378 
4379 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4380 		set_ic = true;
4381 	else if (!priv->tx_coal_frames[queue])
4382 		set_ic = false;
4383 	else if (tx_packets > priv->tx_coal_frames[queue])
4384 		set_ic = true;
4385 	else if ((tx_q->tx_count_frames %
4386 		  priv->tx_coal_frames[queue]) < tx_packets)
4387 		set_ic = true;
4388 	else
4389 		set_ic = false;
4390 
4391 	if (set_ic) {
4392 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4393 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4394 		else
4395 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4396 
4397 		tx_q->tx_count_frames = 0;
4398 		stmmac_set_tx_ic(priv, desc);
4399 	}
4400 
4401 	/* We've used all descriptors we need for this skb, however,
4402 	 * advance cur_tx so that it references a fresh descriptor.
4403 	 * ndo_start_xmit will fill this descriptor the next time it's
4404 	 * called and stmmac_tx_clean may clean up to this descriptor.
4405 	 */
4406 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4407 
4408 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4409 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4410 			  __func__);
4411 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4412 	}
4413 
4414 	u64_stats_update_begin(&txq_stats->q_syncp);
4415 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4416 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4417 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4418 	if (set_ic)
4419 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4420 	u64_stats_update_end(&txq_stats->q_syncp);
4421 
4422 	if (priv->sarc_type)
4423 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4424 
4425 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4426 		     priv->hwts_tx_en)) {
4427 		/* declare that device is doing timestamping */
4428 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4429 		stmmac_enable_tx_timestamp(priv, first);
4430 	}
4431 
4432 	/* Complete the first descriptor before granting the DMA */
4433 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4434 			proto_hdr_len,
4435 			pay_len,
4436 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4437 			hdr / 4, (skb->len - proto_hdr_len));
4438 
4439 	/* If context desc is used to change MSS */
4440 	if (mss_desc) {
4441 		/* Make sure that first descriptor has been completely
4442 		 * written, including its own bit. This is because MSS is
4443 		 * actually before first descriptor, so we need to make
4444 		 * sure that MSS's own bit is the last thing written.
4445 		 */
4446 		dma_wmb();
4447 		stmmac_set_tx_owner(priv, mss_desc);
4448 	}
4449 
4450 	if (netif_msg_pktdata(priv)) {
4451 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4452 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4453 			tx_q->cur_tx, first, nfrags);
4454 		pr_info(">>> frame to be transmitted: ");
4455 		print_pkt(skb->data, skb_headlen(skb));
4456 	}
4457 
4458 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4459 	skb_tx_timestamp(skb);
4460 
4461 	stmmac_flush_tx_descriptors(priv, queue);
4462 	stmmac_tx_timer_arm(priv, queue);
4463 
4464 	return NETDEV_TX_OK;
4465 
4466 dma_map_err:
4467 	dev_err(priv->device, "Tx dma map failed\n");
4468 	dev_kfree_skb(skb);
4469 	priv->xstats.tx_dropped++;
4470 	return NETDEV_TX_OK;
4471 }
4472 
4473 /**
4474  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4475  * @skb: socket buffer to check
4476  *
4477  * Check if a packet has an ethertype that will trigger the IP header checks
4478  * and IP/TCP checksum engine of the stmmac core.
4479  *
4480  * Return: true if the ethertype can trigger the checksum engine, false
4481  * otherwise
4482  */
stmmac_has_ip_ethertype(struct sk_buff * skb)4483 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4484 {
4485 	int depth = 0;
4486 	__be16 proto;
4487 
4488 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4489 				    &depth);
4490 
4491 	return (depth <= ETH_HLEN) &&
4492 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4493 }
4494 
4495 /**
4496  *  stmmac_xmit - Tx entry point of the driver
4497  *  @skb : the socket buffer
4498  *  @dev : device pointer
4499  *  Description : this is the tx entry point of the driver.
4500  *  It programs the chain or the ring and supports oversized frames
4501  *  and SG feature.
4502  */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4503 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4504 {
4505 	unsigned int first_entry, tx_packets, enh_desc;
4506 	struct stmmac_priv *priv = netdev_priv(dev);
4507 	unsigned int nopaged_len = skb_headlen(skb);
4508 	int i, csum_insertion = 0, is_jumbo = 0;
4509 	u32 queue = skb_get_queue_mapping(skb);
4510 	int nfrags = skb_shinfo(skb)->nr_frags;
4511 	int gso = skb_shinfo(skb)->gso_type;
4512 	struct stmmac_txq_stats *txq_stats;
4513 	struct dma_edesc *tbs_desc = NULL;
4514 	struct dma_desc *desc, *first;
4515 	struct stmmac_tx_queue *tx_q;
4516 	bool has_vlan, set_ic;
4517 	int entry, first_tx;
4518 	dma_addr_t des;
4519 
4520 	tx_q = &priv->dma_conf.tx_queue[queue];
4521 	txq_stats = &priv->xstats.txq_stats[queue];
4522 	first_tx = tx_q->cur_tx;
4523 
4524 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4525 		stmmac_disable_eee_mode(priv);
4526 
4527 	/* Manage oversized TCP frames for GMAC4 device */
4528 	if (skb_is_gso(skb) && priv->tso) {
4529 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4530 			return stmmac_tso_xmit(skb, dev);
4531 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4532 			return stmmac_tso_xmit(skb, dev);
4533 	}
4534 
4535 	if (priv->est && priv->est->enable &&
4536 	    priv->est->max_sdu[queue] &&
4537 	    skb->len > priv->est->max_sdu[queue]){
4538 		priv->xstats.max_sdu_txq_drop[queue]++;
4539 		goto max_sdu_err;
4540 	}
4541 
4542 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4543 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4544 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4545 								queue));
4546 			/* This is a hard error, log it. */
4547 			netdev_err(priv->dev,
4548 				   "%s: Tx Ring full when queue awake\n",
4549 				   __func__);
4550 		}
4551 		return NETDEV_TX_BUSY;
4552 	}
4553 
4554 	/* Check if VLAN can be inserted by HW */
4555 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4556 
4557 	entry = tx_q->cur_tx;
4558 	first_entry = entry;
4559 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4560 
4561 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4562 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4563 	 * queues. In that case, checksum offloading for those queues that don't
4564 	 * support tx coe needs to fallback to software checksum calculation.
4565 	 *
4566 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4567 	 * also have to be checksummed in software.
4568 	 */
4569 	if (csum_insertion &&
4570 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4571 	     !stmmac_has_ip_ethertype(skb))) {
4572 		if (unlikely(skb_checksum_help(skb)))
4573 			goto dma_map_err;
4574 		csum_insertion = !csum_insertion;
4575 	}
4576 
4577 	if (likely(priv->extend_desc))
4578 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4579 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4580 		desc = &tx_q->dma_entx[entry].basic;
4581 	else
4582 		desc = tx_q->dma_tx + entry;
4583 
4584 	first = desc;
4585 
4586 	if (has_vlan)
4587 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4588 
4589 	enh_desc = priv->plat->enh_desc;
4590 	/* To program the descriptors according to the size of the frame */
4591 	if (enh_desc)
4592 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4593 
4594 	if (unlikely(is_jumbo)) {
4595 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4596 		if (unlikely(entry < 0) && (entry != -EINVAL))
4597 			goto dma_map_err;
4598 	}
4599 
4600 	for (i = 0; i < nfrags; i++) {
4601 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4602 		int len = skb_frag_size(frag);
4603 		bool last_segment = (i == (nfrags - 1));
4604 
4605 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4606 		WARN_ON(tx_q->tx_skbuff[entry]);
4607 
4608 		if (likely(priv->extend_desc))
4609 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4610 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4611 			desc = &tx_q->dma_entx[entry].basic;
4612 		else
4613 			desc = tx_q->dma_tx + entry;
4614 
4615 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4616 				       DMA_TO_DEVICE);
4617 		if (dma_mapping_error(priv->device, des))
4618 			goto dma_map_err; /* should reuse desc w/o issues */
4619 
4620 		tx_q->tx_skbuff_dma[entry].buf = des;
4621 
4622 		stmmac_set_desc_addr(priv, desc, des);
4623 
4624 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4625 		tx_q->tx_skbuff_dma[entry].len = len;
4626 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4627 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4628 
4629 		/* Prepare the descriptor and set the own bit too */
4630 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4631 				priv->mode, 1, last_segment, skb->len);
4632 	}
4633 
4634 	/* Only the last descriptor gets to point to the skb. */
4635 	tx_q->tx_skbuff[entry] = skb;
4636 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4637 
4638 	/* According to the coalesce parameter the IC bit for the latest
4639 	 * segment is reset and the timer re-started to clean the tx status.
4640 	 * This approach takes care about the fragments: desc is the first
4641 	 * element in case of no SG.
4642 	 */
4643 	tx_packets = (entry + 1) - first_tx;
4644 	tx_q->tx_count_frames += tx_packets;
4645 
4646 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4647 		set_ic = true;
4648 	else if (!priv->tx_coal_frames[queue])
4649 		set_ic = false;
4650 	else if (tx_packets > priv->tx_coal_frames[queue])
4651 		set_ic = true;
4652 	else if ((tx_q->tx_count_frames %
4653 		  priv->tx_coal_frames[queue]) < tx_packets)
4654 		set_ic = true;
4655 	else
4656 		set_ic = false;
4657 
4658 	if (set_ic) {
4659 		if (likely(priv->extend_desc))
4660 			desc = &tx_q->dma_etx[entry].basic;
4661 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4662 			desc = &tx_q->dma_entx[entry].basic;
4663 		else
4664 			desc = &tx_q->dma_tx[entry];
4665 
4666 		tx_q->tx_count_frames = 0;
4667 		stmmac_set_tx_ic(priv, desc);
4668 	}
4669 
4670 	/* We've used all descriptors we need for this skb, however,
4671 	 * advance cur_tx so that it references a fresh descriptor.
4672 	 * ndo_start_xmit will fill this descriptor the next time it's
4673 	 * called and stmmac_tx_clean may clean up to this descriptor.
4674 	 */
4675 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4676 	tx_q->cur_tx = entry;
4677 
4678 	if (netif_msg_pktdata(priv)) {
4679 		netdev_dbg(priv->dev,
4680 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4681 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4682 			   entry, first, nfrags);
4683 
4684 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4685 		print_pkt(skb->data, skb->len);
4686 	}
4687 
4688 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4689 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4690 			  __func__);
4691 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4692 	}
4693 
4694 	u64_stats_update_begin(&txq_stats->q_syncp);
4695 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4696 	if (set_ic)
4697 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4698 	u64_stats_update_end(&txq_stats->q_syncp);
4699 
4700 	if (priv->sarc_type)
4701 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4702 
4703 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4704 	 * problems because all the descriptors are actually ready to be
4705 	 * passed to the DMA engine.
4706 	 */
4707 	if (likely(!is_jumbo)) {
4708 		bool last_segment = (nfrags == 0);
4709 
4710 		des = dma_map_single(priv->device, skb->data,
4711 				     nopaged_len, DMA_TO_DEVICE);
4712 		if (dma_mapping_error(priv->device, des))
4713 			goto dma_map_err;
4714 
4715 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4716 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4717 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4718 
4719 		stmmac_set_desc_addr(priv, first, des);
4720 
4721 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4722 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4723 
4724 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4725 			     priv->hwts_tx_en)) {
4726 			/* declare that device is doing timestamping */
4727 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4728 			stmmac_enable_tx_timestamp(priv, first);
4729 		}
4730 
4731 		/* Prepare the first descriptor setting the OWN bit too */
4732 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4733 				csum_insertion, priv->mode, 0, last_segment,
4734 				skb->len);
4735 	}
4736 
4737 	if (tx_q->tbs & STMMAC_TBS_EN) {
4738 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4739 
4740 		tbs_desc = &tx_q->dma_entx[first_entry];
4741 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4742 	}
4743 
4744 	stmmac_set_tx_owner(priv, first);
4745 
4746 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4747 
4748 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4749 	skb_tx_timestamp(skb);
4750 	stmmac_flush_tx_descriptors(priv, queue);
4751 	stmmac_tx_timer_arm(priv, queue);
4752 
4753 	return NETDEV_TX_OK;
4754 
4755 dma_map_err:
4756 	netdev_err(priv->dev, "Tx DMA map failed\n");
4757 max_sdu_err:
4758 	dev_kfree_skb(skb);
4759 	priv->xstats.tx_dropped++;
4760 	return NETDEV_TX_OK;
4761 }
4762 
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4763 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4764 {
4765 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4766 	__be16 vlan_proto = veth->h_vlan_proto;
4767 	u16 vlanid;
4768 
4769 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4770 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4771 	    (vlan_proto == htons(ETH_P_8021AD) &&
4772 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4773 		/* pop the vlan tag */
4774 		vlanid = ntohs(veth->h_vlan_TCI);
4775 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4776 		skb_pull(skb, VLAN_HLEN);
4777 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4778 	}
4779 }
4780 
4781 /**
4782  * stmmac_rx_refill - refill used skb preallocated buffers
4783  * @priv: driver private structure
4784  * @queue: RX queue index
4785  * Description : this is to reallocate the skb for the reception process
4786  * that is based on zero-copy.
4787  */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4788 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4789 {
4790 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4791 	int dirty = stmmac_rx_dirty(priv, queue);
4792 	unsigned int entry = rx_q->dirty_rx;
4793 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4794 
4795 	if (priv->dma_cap.host_dma_width <= 32)
4796 		gfp |= GFP_DMA32;
4797 
4798 	while (dirty-- > 0) {
4799 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4800 		struct dma_desc *p;
4801 		bool use_rx_wd;
4802 
4803 		if (priv->extend_desc)
4804 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4805 		else
4806 			p = rx_q->dma_rx + entry;
4807 
4808 		if (!buf->page) {
4809 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4810 			if (!buf->page)
4811 				break;
4812 		}
4813 
4814 		if (priv->sph && !buf->sec_page) {
4815 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4816 			if (!buf->sec_page)
4817 				break;
4818 
4819 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4820 		}
4821 
4822 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4823 
4824 		stmmac_set_desc_addr(priv, p, buf->addr);
4825 		if (priv->sph)
4826 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4827 		else
4828 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4829 		stmmac_refill_desc3(priv, rx_q, p);
4830 
4831 		rx_q->rx_count_frames++;
4832 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4833 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4834 			rx_q->rx_count_frames = 0;
4835 
4836 		use_rx_wd = !priv->rx_coal_frames[queue];
4837 		use_rx_wd |= rx_q->rx_count_frames > 0;
4838 		if (!priv->use_riwt)
4839 			use_rx_wd = false;
4840 
4841 		dma_wmb();
4842 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4843 
4844 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4845 	}
4846 	rx_q->dirty_rx = entry;
4847 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4848 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4849 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4850 }
4851 
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4852 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4853 				       struct dma_desc *p,
4854 				       int status, unsigned int len)
4855 {
4856 	unsigned int plen = 0, hlen = 0;
4857 	int coe = priv->hw->rx_csum;
4858 
4859 	/* Not first descriptor, buffer is always zero */
4860 	if (priv->sph && len)
4861 		return 0;
4862 
4863 	/* First descriptor, get split header length */
4864 	stmmac_get_rx_header_len(priv, p, &hlen);
4865 	if (priv->sph && hlen) {
4866 		priv->xstats.rx_split_hdr_pkt_n++;
4867 		return hlen;
4868 	}
4869 
4870 	/* First descriptor, not last descriptor and not split header */
4871 	if (status & rx_not_ls)
4872 		return priv->dma_conf.dma_buf_sz;
4873 
4874 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4875 
4876 	/* First descriptor and last descriptor and not split header */
4877 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4878 }
4879 
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4880 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4881 				       struct dma_desc *p,
4882 				       int status, unsigned int len)
4883 {
4884 	int coe = priv->hw->rx_csum;
4885 	unsigned int plen = 0;
4886 
4887 	/* Not split header, buffer is not available */
4888 	if (!priv->sph)
4889 		return 0;
4890 
4891 	/* Not last descriptor */
4892 	if (status & rx_not_ls)
4893 		return priv->dma_conf.dma_buf_sz;
4894 
4895 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4896 
4897 	/* Last descriptor */
4898 	return plen - len;
4899 }
4900 
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4901 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4902 				struct xdp_frame *xdpf, bool dma_map)
4903 {
4904 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4905 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4906 	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
4907 	unsigned int entry = tx_q->cur_tx;
4908 	struct dma_desc *tx_desc;
4909 	dma_addr_t dma_addr;
4910 	bool set_ic;
4911 
4912 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4913 		return STMMAC_XDP_CONSUMED;
4914 
4915 	if (priv->est && priv->est->enable &&
4916 	    priv->est->max_sdu[queue] &&
4917 	    xdpf->len > priv->est->max_sdu[queue]) {
4918 		priv->xstats.max_sdu_txq_drop[queue]++;
4919 		return STMMAC_XDP_CONSUMED;
4920 	}
4921 
4922 	if (likely(priv->extend_desc))
4923 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4924 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4925 		tx_desc = &tx_q->dma_entx[entry].basic;
4926 	else
4927 		tx_desc = tx_q->dma_tx + entry;
4928 
4929 	if (dma_map) {
4930 		dma_addr = dma_map_single(priv->device, xdpf->data,
4931 					  xdpf->len, DMA_TO_DEVICE);
4932 		if (dma_mapping_error(priv->device, dma_addr))
4933 			return STMMAC_XDP_CONSUMED;
4934 
4935 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4936 	} else {
4937 		struct page *page = virt_to_page(xdpf->data);
4938 
4939 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4940 			   xdpf->headroom;
4941 		dma_sync_single_for_device(priv->device, dma_addr,
4942 					   xdpf->len, DMA_BIDIRECTIONAL);
4943 
4944 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4945 	}
4946 
4947 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4948 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4949 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4950 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4951 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4952 
4953 	tx_q->xdpf[entry] = xdpf;
4954 
4955 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4956 
4957 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4958 			       csum, priv->mode, true, true,
4959 			       xdpf->len);
4960 
4961 	tx_q->tx_count_frames++;
4962 
4963 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4964 		set_ic = true;
4965 	else
4966 		set_ic = false;
4967 
4968 	if (set_ic) {
4969 		tx_q->tx_count_frames = 0;
4970 		stmmac_set_tx_ic(priv, tx_desc);
4971 		u64_stats_update_begin(&txq_stats->q_syncp);
4972 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4973 		u64_stats_update_end(&txq_stats->q_syncp);
4974 	}
4975 
4976 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4977 
4978 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4979 	tx_q->cur_tx = entry;
4980 
4981 	return STMMAC_XDP_TX;
4982 }
4983 
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)4984 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4985 				   int cpu)
4986 {
4987 	int index = cpu;
4988 
4989 	if (unlikely(index < 0))
4990 		index = 0;
4991 
4992 	while (index >= priv->plat->tx_queues_to_use)
4993 		index -= priv->plat->tx_queues_to_use;
4994 
4995 	return index;
4996 }
4997 
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)4998 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4999 				struct xdp_buff *xdp)
5000 {
5001 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5002 	int cpu = smp_processor_id();
5003 	struct netdev_queue *nq;
5004 	int queue;
5005 	int res;
5006 
5007 	if (unlikely(!xdpf))
5008 		return STMMAC_XDP_CONSUMED;
5009 
5010 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5011 	nq = netdev_get_tx_queue(priv->dev, queue);
5012 
5013 	__netif_tx_lock(nq, cpu);
5014 	/* Avoids TX time-out as we are sharing with slow path */
5015 	txq_trans_cond_update(nq);
5016 
5017 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5018 	if (res == STMMAC_XDP_TX)
5019 		stmmac_flush_tx_descriptors(priv, queue);
5020 
5021 	__netif_tx_unlock(nq);
5022 
5023 	return res;
5024 }
5025 
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)5026 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5027 				 struct bpf_prog *prog,
5028 				 struct xdp_buff *xdp)
5029 {
5030 	u32 act;
5031 	int res;
5032 
5033 	act = bpf_prog_run_xdp(prog, xdp);
5034 	switch (act) {
5035 	case XDP_PASS:
5036 		res = STMMAC_XDP_PASS;
5037 		break;
5038 	case XDP_TX:
5039 		res = stmmac_xdp_xmit_back(priv, xdp);
5040 		break;
5041 	case XDP_REDIRECT:
5042 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5043 			res = STMMAC_XDP_CONSUMED;
5044 		else
5045 			res = STMMAC_XDP_REDIRECT;
5046 		break;
5047 	default:
5048 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5049 		fallthrough;
5050 	case XDP_ABORTED:
5051 		trace_xdp_exception(priv->dev, prog, act);
5052 		fallthrough;
5053 	case XDP_DROP:
5054 		res = STMMAC_XDP_CONSUMED;
5055 		break;
5056 	}
5057 
5058 	return res;
5059 }
5060 
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)5061 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5062 					   struct xdp_buff *xdp)
5063 {
5064 	struct bpf_prog *prog;
5065 	int res;
5066 
5067 	prog = READ_ONCE(priv->xdp_prog);
5068 	if (!prog) {
5069 		res = STMMAC_XDP_PASS;
5070 		goto out;
5071 	}
5072 
5073 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5074 out:
5075 	return ERR_PTR(-res);
5076 }
5077 
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)5078 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5079 				   int xdp_status)
5080 {
5081 	int cpu = smp_processor_id();
5082 	int queue;
5083 
5084 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5085 
5086 	if (xdp_status & STMMAC_XDP_TX)
5087 		stmmac_tx_timer_arm(priv, queue);
5088 
5089 	if (xdp_status & STMMAC_XDP_REDIRECT)
5090 		xdp_do_flush();
5091 }
5092 
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)5093 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5094 					       struct xdp_buff *xdp)
5095 {
5096 	unsigned int metasize = xdp->data - xdp->data_meta;
5097 	unsigned int datasize = xdp->data_end - xdp->data;
5098 	struct sk_buff *skb;
5099 
5100 	skb = napi_alloc_skb(&ch->rxtx_napi,
5101 			     xdp->data_end - xdp->data_hard_start);
5102 	if (unlikely(!skb))
5103 		return NULL;
5104 
5105 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5106 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5107 	if (metasize)
5108 		skb_metadata_set(skb, metasize);
5109 
5110 	return skb;
5111 }
5112 
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)5113 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5114 				   struct dma_desc *p, struct dma_desc *np,
5115 				   struct xdp_buff *xdp)
5116 {
5117 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5118 	struct stmmac_channel *ch = &priv->channel[queue];
5119 	unsigned int len = xdp->data_end - xdp->data;
5120 	enum pkt_hash_types hash_type;
5121 	int coe = priv->hw->rx_csum;
5122 	struct sk_buff *skb;
5123 	u32 hash;
5124 
5125 	skb = stmmac_construct_skb_zc(ch, xdp);
5126 	if (!skb) {
5127 		priv->xstats.rx_dropped++;
5128 		return;
5129 	}
5130 
5131 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5132 	if (priv->hw->hw_vlan_en)
5133 		/* MAC level stripping. */
5134 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5135 	else
5136 		/* Driver level stripping. */
5137 		stmmac_rx_vlan(priv->dev, skb);
5138 	skb->protocol = eth_type_trans(skb, priv->dev);
5139 
5140 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5141 		skb_checksum_none_assert(skb);
5142 	else
5143 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5144 
5145 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5146 		skb_set_hash(skb, hash, hash_type);
5147 
5148 	skb_record_rx_queue(skb, queue);
5149 	napi_gro_receive(&ch->rxtx_napi, skb);
5150 
5151 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5152 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5153 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5154 	u64_stats_update_end(&rxq_stats->napi_syncp);
5155 }
5156 
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5157 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5158 {
5159 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5160 	unsigned int entry = rx_q->dirty_rx;
5161 	struct dma_desc *rx_desc = NULL;
5162 	bool ret = true;
5163 
5164 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5165 
5166 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5167 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5168 		dma_addr_t dma_addr;
5169 		bool use_rx_wd;
5170 
5171 		if (!buf->xdp) {
5172 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5173 			if (!buf->xdp) {
5174 				ret = false;
5175 				break;
5176 			}
5177 		}
5178 
5179 		if (priv->extend_desc)
5180 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5181 		else
5182 			rx_desc = rx_q->dma_rx + entry;
5183 
5184 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5185 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5186 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5187 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5188 
5189 		rx_q->rx_count_frames++;
5190 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5191 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5192 			rx_q->rx_count_frames = 0;
5193 
5194 		use_rx_wd = !priv->rx_coal_frames[queue];
5195 		use_rx_wd |= rx_q->rx_count_frames > 0;
5196 		if (!priv->use_riwt)
5197 			use_rx_wd = false;
5198 
5199 		dma_wmb();
5200 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5201 
5202 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5203 	}
5204 
5205 	if (rx_desc) {
5206 		rx_q->dirty_rx = entry;
5207 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5208 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5209 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5210 	}
5211 
5212 	return ret;
5213 }
5214 
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5215 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5216 {
5217 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5218 	 * to represent incoming packet, whereas cb field in the same structure
5219 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5220 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5221 	 */
5222 	return (struct stmmac_xdp_buff *)xdp;
5223 }
5224 
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5225 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5226 {
5227 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5228 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5229 	unsigned int count = 0, error = 0, len = 0;
5230 	int dirty = stmmac_rx_dirty(priv, queue);
5231 	unsigned int next_entry = rx_q->cur_rx;
5232 	u32 rx_errors = 0, rx_dropped = 0;
5233 	unsigned int desc_size;
5234 	struct bpf_prog *prog;
5235 	bool failure = false;
5236 	int xdp_status = 0;
5237 	int status = 0;
5238 
5239 	if (netif_msg_rx_status(priv)) {
5240 		void *rx_head;
5241 
5242 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5243 		if (priv->extend_desc) {
5244 			rx_head = (void *)rx_q->dma_erx;
5245 			desc_size = sizeof(struct dma_extended_desc);
5246 		} else {
5247 			rx_head = (void *)rx_q->dma_rx;
5248 			desc_size = sizeof(struct dma_desc);
5249 		}
5250 
5251 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5252 				    rx_q->dma_rx_phy, desc_size);
5253 	}
5254 	while (count < limit) {
5255 		struct stmmac_rx_buffer *buf;
5256 		struct stmmac_xdp_buff *ctx;
5257 		unsigned int buf1_len = 0;
5258 		struct dma_desc *np, *p;
5259 		int entry;
5260 		int res;
5261 
5262 		if (!count && rx_q->state_saved) {
5263 			error = rx_q->state.error;
5264 			len = rx_q->state.len;
5265 		} else {
5266 			rx_q->state_saved = false;
5267 			error = 0;
5268 			len = 0;
5269 		}
5270 
5271 		if (count >= limit)
5272 			break;
5273 
5274 read_again:
5275 		buf1_len = 0;
5276 		entry = next_entry;
5277 		buf = &rx_q->buf_pool[entry];
5278 
5279 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5280 			failure = failure ||
5281 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5282 			dirty = 0;
5283 		}
5284 
5285 		if (priv->extend_desc)
5286 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5287 		else
5288 			p = rx_q->dma_rx + entry;
5289 
5290 		/* read the status of the incoming frame */
5291 		status = stmmac_rx_status(priv, &priv->xstats, p);
5292 		/* check if managed by the DMA otherwise go ahead */
5293 		if (unlikely(status & dma_own))
5294 			break;
5295 
5296 		/* Prefetch the next RX descriptor */
5297 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5298 						priv->dma_conf.dma_rx_size);
5299 		next_entry = rx_q->cur_rx;
5300 
5301 		if (priv->extend_desc)
5302 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5303 		else
5304 			np = rx_q->dma_rx + next_entry;
5305 
5306 		prefetch(np);
5307 
5308 		/* Ensure a valid XSK buffer before proceed */
5309 		if (!buf->xdp)
5310 			break;
5311 
5312 		if (priv->extend_desc)
5313 			stmmac_rx_extended_status(priv, &priv->xstats,
5314 						  rx_q->dma_erx + entry);
5315 		if (unlikely(status == discard_frame)) {
5316 			xsk_buff_free(buf->xdp);
5317 			buf->xdp = NULL;
5318 			dirty++;
5319 			error = 1;
5320 			if (!priv->hwts_rx_en)
5321 				rx_errors++;
5322 		}
5323 
5324 		if (unlikely(error && (status & rx_not_ls)))
5325 			goto read_again;
5326 		if (unlikely(error)) {
5327 			count++;
5328 			continue;
5329 		}
5330 
5331 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5332 		if (likely(status & rx_not_ls)) {
5333 			xsk_buff_free(buf->xdp);
5334 			buf->xdp = NULL;
5335 			dirty++;
5336 			count++;
5337 			goto read_again;
5338 		}
5339 
5340 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5341 		ctx->priv = priv;
5342 		ctx->desc = p;
5343 		ctx->ndesc = np;
5344 
5345 		/* XDP ZC Frame only support primary buffers for now */
5346 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5347 		len += buf1_len;
5348 
5349 		/* ACS is disabled; strip manually. */
5350 		if (likely(!(status & rx_not_ls))) {
5351 			buf1_len -= ETH_FCS_LEN;
5352 			len -= ETH_FCS_LEN;
5353 		}
5354 
5355 		/* RX buffer is good and fit into a XSK pool buffer */
5356 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5357 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5358 
5359 		prog = READ_ONCE(priv->xdp_prog);
5360 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5361 
5362 		switch (res) {
5363 		case STMMAC_XDP_PASS:
5364 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5365 			xsk_buff_free(buf->xdp);
5366 			break;
5367 		case STMMAC_XDP_CONSUMED:
5368 			xsk_buff_free(buf->xdp);
5369 			rx_dropped++;
5370 			break;
5371 		case STMMAC_XDP_TX:
5372 		case STMMAC_XDP_REDIRECT:
5373 			xdp_status |= res;
5374 			break;
5375 		}
5376 
5377 		buf->xdp = NULL;
5378 		dirty++;
5379 		count++;
5380 	}
5381 
5382 	if (status & rx_not_ls) {
5383 		rx_q->state_saved = true;
5384 		rx_q->state.error = error;
5385 		rx_q->state.len = len;
5386 	}
5387 
5388 	stmmac_finalize_xdp_rx(priv, xdp_status);
5389 
5390 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5391 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5392 	u64_stats_update_end(&rxq_stats->napi_syncp);
5393 
5394 	priv->xstats.rx_dropped += rx_dropped;
5395 	priv->xstats.rx_errors += rx_errors;
5396 
5397 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5398 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5399 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5400 		else
5401 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5402 
5403 		return (int)count;
5404 	}
5405 
5406 	return failure ? limit : (int)count;
5407 }
5408 
5409 /**
5410  * stmmac_rx - manage the receive process
5411  * @priv: driver private structure
5412  * @limit: napi bugget
5413  * @queue: RX queue index.
5414  * Description :  this the function called by the napi poll method.
5415  * It gets all the frames inside the ring.
5416  */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5417 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5418 {
5419 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5420 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5421 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5422 	struct stmmac_channel *ch = &priv->channel[queue];
5423 	unsigned int count = 0, error = 0, len = 0;
5424 	int status = 0, coe = priv->hw->rx_csum;
5425 	unsigned int next_entry = rx_q->cur_rx;
5426 	enum dma_data_direction dma_dir;
5427 	unsigned int desc_size;
5428 	struct sk_buff *skb = NULL;
5429 	struct stmmac_xdp_buff ctx;
5430 	int xdp_status = 0;
5431 	int buf_sz;
5432 
5433 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5434 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5435 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5436 
5437 	if (netif_msg_rx_status(priv)) {
5438 		void *rx_head;
5439 
5440 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5441 		if (priv->extend_desc) {
5442 			rx_head = (void *)rx_q->dma_erx;
5443 			desc_size = sizeof(struct dma_extended_desc);
5444 		} else {
5445 			rx_head = (void *)rx_q->dma_rx;
5446 			desc_size = sizeof(struct dma_desc);
5447 		}
5448 
5449 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5450 				    rx_q->dma_rx_phy, desc_size);
5451 	}
5452 	while (count < limit) {
5453 		unsigned int buf1_len = 0, buf2_len = 0;
5454 		enum pkt_hash_types hash_type;
5455 		struct stmmac_rx_buffer *buf;
5456 		struct dma_desc *np, *p;
5457 		int entry;
5458 		u32 hash;
5459 
5460 		if (!count && rx_q->state_saved) {
5461 			skb = rx_q->state.skb;
5462 			error = rx_q->state.error;
5463 			len = rx_q->state.len;
5464 		} else {
5465 			rx_q->state_saved = false;
5466 			skb = NULL;
5467 			error = 0;
5468 			len = 0;
5469 		}
5470 
5471 read_again:
5472 		if (count >= limit)
5473 			break;
5474 
5475 		buf1_len = 0;
5476 		buf2_len = 0;
5477 		entry = next_entry;
5478 		buf = &rx_q->buf_pool[entry];
5479 
5480 		if (priv->extend_desc)
5481 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5482 		else
5483 			p = rx_q->dma_rx + entry;
5484 
5485 		/* read the status of the incoming frame */
5486 		status = stmmac_rx_status(priv, &priv->xstats, p);
5487 		/* check if managed by the DMA otherwise go ahead */
5488 		if (unlikely(status & dma_own))
5489 			break;
5490 
5491 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5492 						priv->dma_conf.dma_rx_size);
5493 		next_entry = rx_q->cur_rx;
5494 
5495 		if (priv->extend_desc)
5496 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5497 		else
5498 			np = rx_q->dma_rx + next_entry;
5499 
5500 		prefetch(np);
5501 
5502 		if (priv->extend_desc)
5503 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5504 		if (unlikely(status == discard_frame)) {
5505 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5506 			buf->page = NULL;
5507 			error = 1;
5508 			if (!priv->hwts_rx_en)
5509 				rx_errors++;
5510 		}
5511 
5512 		if (unlikely(error && (status & rx_not_ls)))
5513 			goto read_again;
5514 		if (unlikely(error)) {
5515 			dev_kfree_skb(skb);
5516 			skb = NULL;
5517 			count++;
5518 			continue;
5519 		}
5520 
5521 		/* Buffer is good. Go on. */
5522 
5523 		prefetch(page_address(buf->page) + buf->page_offset);
5524 		if (buf->sec_page)
5525 			prefetch(page_address(buf->sec_page));
5526 
5527 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5528 		len += buf1_len;
5529 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5530 		len += buf2_len;
5531 
5532 		/* ACS is disabled; strip manually. */
5533 		if (likely(!(status & rx_not_ls))) {
5534 			if (buf2_len) {
5535 				buf2_len -= ETH_FCS_LEN;
5536 				len -= ETH_FCS_LEN;
5537 			} else if (buf1_len) {
5538 				buf1_len -= ETH_FCS_LEN;
5539 				len -= ETH_FCS_LEN;
5540 			}
5541 		}
5542 
5543 		if (!skb) {
5544 			unsigned int pre_len, sync_len;
5545 
5546 			dma_sync_single_for_cpu(priv->device, buf->addr,
5547 						buf1_len, dma_dir);
5548 
5549 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5550 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5551 					 buf->page_offset, buf1_len, true);
5552 
5553 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5554 				  buf->page_offset;
5555 
5556 			ctx.priv = priv;
5557 			ctx.desc = p;
5558 			ctx.ndesc = np;
5559 
5560 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5561 			/* Due xdp_adjust_tail: DMA sync for_device
5562 			 * cover max len CPU touch
5563 			 */
5564 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5565 				   buf->page_offset;
5566 			sync_len = max(sync_len, pre_len);
5567 
5568 			/* For Not XDP_PASS verdict */
5569 			if (IS_ERR(skb)) {
5570 				unsigned int xdp_res = -PTR_ERR(skb);
5571 
5572 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5573 					page_pool_put_page(rx_q->page_pool,
5574 							   virt_to_head_page(ctx.xdp.data),
5575 							   sync_len, true);
5576 					buf->page = NULL;
5577 					rx_dropped++;
5578 
5579 					/* Clear skb as it was set as
5580 					 * status by XDP program.
5581 					 */
5582 					skb = NULL;
5583 
5584 					if (unlikely((status & rx_not_ls)))
5585 						goto read_again;
5586 
5587 					count++;
5588 					continue;
5589 				} else if (xdp_res & (STMMAC_XDP_TX |
5590 						      STMMAC_XDP_REDIRECT)) {
5591 					xdp_status |= xdp_res;
5592 					buf->page = NULL;
5593 					skb = NULL;
5594 					count++;
5595 					continue;
5596 				}
5597 			}
5598 		}
5599 
5600 		if (!skb) {
5601 			/* XDP program may expand or reduce tail */
5602 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5603 
5604 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5605 			if (!skb) {
5606 				rx_dropped++;
5607 				count++;
5608 				goto drain_data;
5609 			}
5610 
5611 			/* XDP program may adjust header */
5612 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5613 			skb_put(skb, buf1_len);
5614 
5615 			/* Data payload copied into SKB, page ready for recycle */
5616 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5617 			buf->page = NULL;
5618 		} else if (buf1_len) {
5619 			dma_sync_single_for_cpu(priv->device, buf->addr,
5620 						buf1_len, dma_dir);
5621 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5622 					buf->page, buf->page_offset, buf1_len,
5623 					priv->dma_conf.dma_buf_sz);
5624 
5625 			/* Data payload appended into SKB */
5626 			skb_mark_for_recycle(skb);
5627 			buf->page = NULL;
5628 		}
5629 
5630 		if (buf2_len) {
5631 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5632 						buf2_len, dma_dir);
5633 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5634 					buf->sec_page, 0, buf2_len,
5635 					priv->dma_conf.dma_buf_sz);
5636 
5637 			/* Data payload appended into SKB */
5638 			skb_mark_for_recycle(skb);
5639 			buf->sec_page = NULL;
5640 		}
5641 
5642 drain_data:
5643 		if (likely(status & rx_not_ls))
5644 			goto read_again;
5645 		if (!skb)
5646 			continue;
5647 
5648 		/* Got entire packet into SKB. Finish it. */
5649 
5650 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5651 
5652 		if (priv->hw->hw_vlan_en)
5653 			/* MAC level stripping. */
5654 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5655 		else
5656 			/* Driver level stripping. */
5657 			stmmac_rx_vlan(priv->dev, skb);
5658 
5659 		skb->protocol = eth_type_trans(skb, priv->dev);
5660 
5661 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5662 			skb_checksum_none_assert(skb);
5663 		else
5664 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5665 
5666 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5667 			skb_set_hash(skb, hash, hash_type);
5668 
5669 		skb_record_rx_queue(skb, queue);
5670 		napi_gro_receive(&ch->rx_napi, skb);
5671 		skb = NULL;
5672 
5673 		rx_packets++;
5674 		rx_bytes += len;
5675 		count++;
5676 	}
5677 
5678 	if (status & rx_not_ls || skb) {
5679 		rx_q->state_saved = true;
5680 		rx_q->state.skb = skb;
5681 		rx_q->state.error = error;
5682 		rx_q->state.len = len;
5683 	}
5684 
5685 	stmmac_finalize_xdp_rx(priv, xdp_status);
5686 
5687 	stmmac_rx_refill(priv, queue);
5688 
5689 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5690 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5691 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5692 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5693 	u64_stats_update_end(&rxq_stats->napi_syncp);
5694 
5695 	priv->xstats.rx_dropped += rx_dropped;
5696 	priv->xstats.rx_errors += rx_errors;
5697 
5698 	return count;
5699 }
5700 
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5701 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5702 {
5703 	struct stmmac_channel *ch =
5704 		container_of(napi, struct stmmac_channel, rx_napi);
5705 	struct stmmac_priv *priv = ch->priv_data;
5706 	struct stmmac_rxq_stats *rxq_stats;
5707 	u32 chan = ch->index;
5708 	int work_done;
5709 
5710 	rxq_stats = &priv->xstats.rxq_stats[chan];
5711 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5712 	u64_stats_inc(&rxq_stats->napi.poll);
5713 	u64_stats_update_end(&rxq_stats->napi_syncp);
5714 
5715 	work_done = stmmac_rx(priv, budget, chan);
5716 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5717 		unsigned long flags;
5718 
5719 		spin_lock_irqsave(&ch->lock, flags);
5720 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5721 		spin_unlock_irqrestore(&ch->lock, flags);
5722 	}
5723 
5724 	return work_done;
5725 }
5726 
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5727 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5728 {
5729 	struct stmmac_channel *ch =
5730 		container_of(napi, struct stmmac_channel, tx_napi);
5731 	struct stmmac_priv *priv = ch->priv_data;
5732 	struct stmmac_txq_stats *txq_stats;
5733 	bool pending_packets = false;
5734 	u32 chan = ch->index;
5735 	int work_done;
5736 
5737 	txq_stats = &priv->xstats.txq_stats[chan];
5738 	u64_stats_update_begin(&txq_stats->napi_syncp);
5739 	u64_stats_inc(&txq_stats->napi.poll);
5740 	u64_stats_update_end(&txq_stats->napi_syncp);
5741 
5742 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5743 	work_done = min(work_done, budget);
5744 
5745 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5746 		unsigned long flags;
5747 
5748 		spin_lock_irqsave(&ch->lock, flags);
5749 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5750 		spin_unlock_irqrestore(&ch->lock, flags);
5751 	}
5752 
5753 	/* TX still have packet to handle, check if we need to arm tx timer */
5754 	if (pending_packets)
5755 		stmmac_tx_timer_arm(priv, chan);
5756 
5757 	return work_done;
5758 }
5759 
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5760 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5761 {
5762 	struct stmmac_channel *ch =
5763 		container_of(napi, struct stmmac_channel, rxtx_napi);
5764 	struct stmmac_priv *priv = ch->priv_data;
5765 	bool tx_pending_packets = false;
5766 	int rx_done, tx_done, rxtx_done;
5767 	struct stmmac_rxq_stats *rxq_stats;
5768 	struct stmmac_txq_stats *txq_stats;
5769 	u32 chan = ch->index;
5770 
5771 	rxq_stats = &priv->xstats.rxq_stats[chan];
5772 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5773 	u64_stats_inc(&rxq_stats->napi.poll);
5774 	u64_stats_update_end(&rxq_stats->napi_syncp);
5775 
5776 	txq_stats = &priv->xstats.txq_stats[chan];
5777 	u64_stats_update_begin(&txq_stats->napi_syncp);
5778 	u64_stats_inc(&txq_stats->napi.poll);
5779 	u64_stats_update_end(&txq_stats->napi_syncp);
5780 
5781 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5782 	tx_done = min(tx_done, budget);
5783 
5784 	rx_done = stmmac_rx_zc(priv, budget, chan);
5785 
5786 	rxtx_done = max(tx_done, rx_done);
5787 
5788 	/* If either TX or RX work is not complete, return budget
5789 	 * and keep pooling
5790 	 */
5791 	if (rxtx_done >= budget)
5792 		return budget;
5793 
5794 	/* all work done, exit the polling mode */
5795 	if (napi_complete_done(napi, rxtx_done)) {
5796 		unsigned long flags;
5797 
5798 		spin_lock_irqsave(&ch->lock, flags);
5799 		/* Both RX and TX work done are compelte,
5800 		 * so enable both RX & TX IRQs.
5801 		 */
5802 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5803 		spin_unlock_irqrestore(&ch->lock, flags);
5804 	}
5805 
5806 	/* TX still have packet to handle, check if we need to arm tx timer */
5807 	if (tx_pending_packets)
5808 		stmmac_tx_timer_arm(priv, chan);
5809 
5810 	return min(rxtx_done, budget - 1);
5811 }
5812 
5813 /**
5814  *  stmmac_tx_timeout
5815  *  @dev : Pointer to net device structure
5816  *  @txqueue: the index of the hanging transmit queue
5817  *  Description: this function is called when a packet transmission fails to
5818  *   complete within a reasonable time. The driver will mark the error in the
5819  *   netdev structure and arrange for the device to be reset to a sane state
5820  *   in order to transmit a new packet.
5821  */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5822 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5823 {
5824 	struct stmmac_priv *priv = netdev_priv(dev);
5825 
5826 	stmmac_global_err(priv);
5827 }
5828 
5829 /**
5830  *  stmmac_set_rx_mode - entry point for multicast addressing
5831  *  @dev : pointer to the device structure
5832  *  Description:
5833  *  This function is a driver entry point which gets called by the kernel
5834  *  whenever multicast addresses must be enabled/disabled.
5835  *  Return value:
5836  *  void.
5837  */
stmmac_set_rx_mode(struct net_device * dev)5838 static void stmmac_set_rx_mode(struct net_device *dev)
5839 {
5840 	struct stmmac_priv *priv = netdev_priv(dev);
5841 
5842 	stmmac_set_filter(priv, priv->hw, dev);
5843 }
5844 
5845 /**
5846  *  stmmac_change_mtu - entry point to change MTU size for the device.
5847  *  @dev : device pointer.
5848  *  @new_mtu : the new MTU size for the device.
5849  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5850  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5851  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5852  *  Return value:
5853  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5854  *  file on failure.
5855  */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5856 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5857 {
5858 	struct stmmac_priv *priv = netdev_priv(dev);
5859 	int txfifosz = priv->plat->tx_fifo_size;
5860 	struct stmmac_dma_conf *dma_conf;
5861 	const int mtu = new_mtu;
5862 	int ret;
5863 
5864 	if (txfifosz == 0)
5865 		txfifosz = priv->dma_cap.tx_fifo_size;
5866 
5867 	txfifosz /= priv->plat->tx_queues_to_use;
5868 
5869 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5870 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5871 		return -EINVAL;
5872 	}
5873 
5874 	new_mtu = STMMAC_ALIGN(new_mtu);
5875 
5876 	/* If condition true, FIFO is too small or MTU too large */
5877 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5878 		return -EINVAL;
5879 
5880 	if (netif_running(dev)) {
5881 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5882 		/* Try to allocate the new DMA conf with the new mtu */
5883 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5884 		if (IS_ERR(dma_conf)) {
5885 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5886 				   mtu);
5887 			return PTR_ERR(dma_conf);
5888 		}
5889 
5890 		stmmac_release(dev);
5891 
5892 		ret = __stmmac_open(dev, dma_conf);
5893 		if (ret) {
5894 			free_dma_desc_resources(priv, dma_conf);
5895 			kfree(dma_conf);
5896 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5897 			return ret;
5898 		}
5899 
5900 		kfree(dma_conf);
5901 
5902 		stmmac_set_rx_mode(dev);
5903 	}
5904 
5905 	WRITE_ONCE(dev->mtu, mtu);
5906 	netdev_update_features(dev);
5907 
5908 	return 0;
5909 }
5910 
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5911 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5912 					     netdev_features_t features)
5913 {
5914 	struct stmmac_priv *priv = netdev_priv(dev);
5915 
5916 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5917 		features &= ~NETIF_F_RXCSUM;
5918 
5919 	if (!priv->plat->tx_coe)
5920 		features &= ~NETIF_F_CSUM_MASK;
5921 
5922 	/* Some GMAC devices have a bugged Jumbo frame support that
5923 	 * needs to have the Tx COE disabled for oversized frames
5924 	 * (due to limited buffer sizes). In this case we disable
5925 	 * the TX csum insertion in the TDES and not use SF.
5926 	 */
5927 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5928 		features &= ~NETIF_F_CSUM_MASK;
5929 
5930 	/* Disable tso if asked by ethtool */
5931 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5932 		if (features & NETIF_F_TSO)
5933 			priv->tso = true;
5934 		else
5935 			priv->tso = false;
5936 	}
5937 
5938 	return features;
5939 }
5940 
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5941 static int stmmac_set_features(struct net_device *netdev,
5942 			       netdev_features_t features)
5943 {
5944 	struct stmmac_priv *priv = netdev_priv(netdev);
5945 
5946 	/* Keep the COE Type in case of csum is supporting */
5947 	if (features & NETIF_F_RXCSUM)
5948 		priv->hw->rx_csum = priv->plat->rx_coe;
5949 	else
5950 		priv->hw->rx_csum = 0;
5951 	/* No check needed because rx_coe has been set before and it will be
5952 	 * fixed in case of issue.
5953 	 */
5954 	stmmac_rx_ipc(priv, priv->hw);
5955 
5956 	if (priv->sph_cap) {
5957 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5958 		u32 chan;
5959 
5960 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5961 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5962 	}
5963 
5964 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5965 		priv->hw->hw_vlan_en = true;
5966 	else
5967 		priv->hw->hw_vlan_en = false;
5968 
5969 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5970 
5971 	return 0;
5972 }
5973 
stmmac_fpe_event_status(struct stmmac_priv * priv,int status)5974 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5975 {
5976 	struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
5977 
5978 	/* This is interrupt context, just spin_lock() */
5979 	spin_lock(&fpe_cfg->lock);
5980 
5981 	if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN)
5982 		goto unlock_out;
5983 
5984 	/* LP has sent verify mPacket */
5985 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER)
5986 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
5987 					MPACKET_RESPONSE);
5988 
5989 	/* Local has sent verify mPacket */
5990 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER &&
5991 	    fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED)
5992 		fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
5993 
5994 	/* LP has sent response mPacket */
5995 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP &&
5996 	    fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING)
5997 		fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
5998 
5999 unlock_out:
6000 	spin_unlock(&fpe_cfg->lock);
6001 }
6002 
stmmac_common_interrupt(struct stmmac_priv * priv)6003 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6004 {
6005 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6006 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6007 	u32 queues_count;
6008 	u32 queue;
6009 	bool xmac;
6010 
6011 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6012 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6013 
6014 	if (priv->irq_wake)
6015 		pm_wakeup_event(priv->device, 0);
6016 
6017 	if (priv->dma_cap.estsel)
6018 		stmmac_est_irq_status(priv, priv, priv->dev,
6019 				      &priv->xstats, tx_cnt);
6020 
6021 	if (priv->dma_cap.fpesel) {
6022 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
6023 						   priv->dev);
6024 
6025 		stmmac_fpe_event_status(priv, status);
6026 	}
6027 
6028 	/* To handle GMAC own interrupts */
6029 	if ((priv->plat->has_gmac) || xmac) {
6030 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6031 
6032 		if (unlikely(status)) {
6033 			/* For LPI we need to save the tx status */
6034 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6035 				priv->tx_path_in_lpi_mode = true;
6036 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6037 				priv->tx_path_in_lpi_mode = false;
6038 		}
6039 
6040 		for (queue = 0; queue < queues_count; queue++)
6041 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6042 
6043 		/* PCS link status */
6044 		if (priv->hw->pcs &&
6045 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6046 			if (priv->xstats.pcs_link)
6047 				netif_carrier_on(priv->dev);
6048 			else
6049 				netif_carrier_off(priv->dev);
6050 		}
6051 
6052 		stmmac_timestamp_interrupt(priv, priv);
6053 	}
6054 }
6055 
6056 /**
6057  *  stmmac_interrupt - main ISR
6058  *  @irq: interrupt number.
6059  *  @dev_id: to pass the net device pointer.
6060  *  Description: this is the main driver interrupt service routine.
6061  *  It can call:
6062  *  o DMA service routine (to manage incoming frame reception and transmission
6063  *    status)
6064  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6065  *    interrupts.
6066  */
stmmac_interrupt(int irq,void * dev_id)6067 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6068 {
6069 	struct net_device *dev = (struct net_device *)dev_id;
6070 	struct stmmac_priv *priv = netdev_priv(dev);
6071 
6072 	/* Check if adapter is up */
6073 	if (test_bit(STMMAC_DOWN, &priv->state))
6074 		return IRQ_HANDLED;
6075 
6076 	/* Check ASP error if it isn't delivered via an individual IRQ */
6077 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6078 		return IRQ_HANDLED;
6079 
6080 	/* To handle Common interrupts */
6081 	stmmac_common_interrupt(priv);
6082 
6083 	/* To handle DMA interrupts */
6084 	stmmac_dma_interrupt(priv);
6085 
6086 	return IRQ_HANDLED;
6087 }
6088 
stmmac_mac_interrupt(int irq,void * dev_id)6089 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6090 {
6091 	struct net_device *dev = (struct net_device *)dev_id;
6092 	struct stmmac_priv *priv = netdev_priv(dev);
6093 
6094 	/* Check if adapter is up */
6095 	if (test_bit(STMMAC_DOWN, &priv->state))
6096 		return IRQ_HANDLED;
6097 
6098 	/* To handle Common interrupts */
6099 	stmmac_common_interrupt(priv);
6100 
6101 	return IRQ_HANDLED;
6102 }
6103 
stmmac_safety_interrupt(int irq,void * dev_id)6104 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6105 {
6106 	struct net_device *dev = (struct net_device *)dev_id;
6107 	struct stmmac_priv *priv = netdev_priv(dev);
6108 
6109 	/* Check if adapter is up */
6110 	if (test_bit(STMMAC_DOWN, &priv->state))
6111 		return IRQ_HANDLED;
6112 
6113 	/* Check if a fatal error happened */
6114 	stmmac_safety_feat_interrupt(priv);
6115 
6116 	return IRQ_HANDLED;
6117 }
6118 
stmmac_msi_intr_tx(int irq,void * data)6119 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6120 {
6121 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6122 	struct stmmac_dma_conf *dma_conf;
6123 	int chan = tx_q->queue_index;
6124 	struct stmmac_priv *priv;
6125 	int status;
6126 
6127 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6128 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6129 
6130 	/* Check if adapter is up */
6131 	if (test_bit(STMMAC_DOWN, &priv->state))
6132 		return IRQ_HANDLED;
6133 
6134 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6135 
6136 	if (unlikely(status & tx_hard_error_bump_tc)) {
6137 		/* Try to bump up the dma threshold on this failure */
6138 		stmmac_bump_dma_threshold(priv, chan);
6139 	} else if (unlikely(status == tx_hard_error)) {
6140 		stmmac_tx_err(priv, chan);
6141 	}
6142 
6143 	return IRQ_HANDLED;
6144 }
6145 
stmmac_msi_intr_rx(int irq,void * data)6146 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6147 {
6148 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6149 	struct stmmac_dma_conf *dma_conf;
6150 	int chan = rx_q->queue_index;
6151 	struct stmmac_priv *priv;
6152 
6153 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6154 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6155 
6156 	/* Check if adapter is up */
6157 	if (test_bit(STMMAC_DOWN, &priv->state))
6158 		return IRQ_HANDLED;
6159 
6160 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6161 
6162 	return IRQ_HANDLED;
6163 }
6164 
6165 /**
6166  *  stmmac_ioctl - Entry point for the Ioctl
6167  *  @dev: Device pointer.
6168  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6169  *  a proprietary structure used to pass information to the driver.
6170  *  @cmd: IOCTL command
6171  *  Description:
6172  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6173  */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6174 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6175 {
6176 	struct stmmac_priv *priv = netdev_priv (dev);
6177 	int ret = -EOPNOTSUPP;
6178 
6179 	if (!netif_running(dev))
6180 		return -EINVAL;
6181 
6182 	switch (cmd) {
6183 	case SIOCGMIIPHY:
6184 	case SIOCGMIIREG:
6185 	case SIOCSMIIREG:
6186 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6187 		break;
6188 	case SIOCSHWTSTAMP:
6189 		ret = stmmac_hwtstamp_set(dev, rq);
6190 		break;
6191 	case SIOCGHWTSTAMP:
6192 		ret = stmmac_hwtstamp_get(dev, rq);
6193 		break;
6194 	default:
6195 		break;
6196 	}
6197 
6198 	return ret;
6199 }
6200 
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6201 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6202 				    void *cb_priv)
6203 {
6204 	struct stmmac_priv *priv = cb_priv;
6205 	int ret = -EOPNOTSUPP;
6206 
6207 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6208 		return ret;
6209 
6210 	__stmmac_disable_all_queues(priv);
6211 
6212 	switch (type) {
6213 	case TC_SETUP_CLSU32:
6214 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6215 		break;
6216 	case TC_SETUP_CLSFLOWER:
6217 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6218 		break;
6219 	default:
6220 		break;
6221 	}
6222 
6223 	stmmac_enable_all_queues(priv);
6224 	return ret;
6225 }
6226 
6227 static LIST_HEAD(stmmac_block_cb_list);
6228 
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6229 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6230 			   void *type_data)
6231 {
6232 	struct stmmac_priv *priv = netdev_priv(ndev);
6233 
6234 	switch (type) {
6235 	case TC_QUERY_CAPS:
6236 		return stmmac_tc_query_caps(priv, priv, type_data);
6237 	case TC_SETUP_QDISC_MQPRIO:
6238 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6239 	case TC_SETUP_BLOCK:
6240 		return flow_block_cb_setup_simple(type_data,
6241 						  &stmmac_block_cb_list,
6242 						  stmmac_setup_tc_block_cb,
6243 						  priv, priv, true);
6244 	case TC_SETUP_QDISC_CBS:
6245 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6246 	case TC_SETUP_QDISC_TAPRIO:
6247 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6248 	case TC_SETUP_QDISC_ETF:
6249 		return stmmac_tc_setup_etf(priv, priv, type_data);
6250 	default:
6251 		return -EOPNOTSUPP;
6252 	}
6253 }
6254 
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6255 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6256 			       struct net_device *sb_dev)
6257 {
6258 	int gso = skb_shinfo(skb)->gso_type;
6259 
6260 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6261 		/*
6262 		 * There is no way to determine the number of TSO/USO
6263 		 * capable Queues. Let's use always the Queue 0
6264 		 * because if TSO/USO is supported then at least this
6265 		 * one will be capable.
6266 		 */
6267 		return 0;
6268 	}
6269 
6270 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6271 }
6272 
stmmac_set_mac_address(struct net_device * ndev,void * addr)6273 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6274 {
6275 	struct stmmac_priv *priv = netdev_priv(ndev);
6276 	int ret = 0;
6277 
6278 	ret = pm_runtime_resume_and_get(priv->device);
6279 	if (ret < 0)
6280 		return ret;
6281 
6282 	ret = eth_mac_addr(ndev, addr);
6283 	if (ret)
6284 		goto set_mac_error;
6285 
6286 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6287 
6288 set_mac_error:
6289 	pm_runtime_put(priv->device);
6290 
6291 	return ret;
6292 }
6293 
6294 #ifdef CONFIG_DEBUG_FS
6295 static struct dentry *stmmac_fs_dir;
6296 
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6297 static void sysfs_display_ring(void *head, int size, int extend_desc,
6298 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6299 {
6300 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6301 	struct dma_desc *p = (struct dma_desc *)head;
6302 	unsigned int desc_size;
6303 	dma_addr_t dma_addr;
6304 	int i;
6305 
6306 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6307 	for (i = 0; i < size; i++) {
6308 		dma_addr = dma_phy_addr + i * desc_size;
6309 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6310 				i, &dma_addr,
6311 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6312 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6313 		if (extend_desc)
6314 			p = &(++ep)->basic;
6315 		else
6316 			p++;
6317 	}
6318 }
6319 
stmmac_rings_status_show(struct seq_file * seq,void * v)6320 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6321 {
6322 	struct net_device *dev = seq->private;
6323 	struct stmmac_priv *priv = netdev_priv(dev);
6324 	u32 rx_count = priv->plat->rx_queues_to_use;
6325 	u32 tx_count = priv->plat->tx_queues_to_use;
6326 	u32 queue;
6327 
6328 	if ((dev->flags & IFF_UP) == 0)
6329 		return 0;
6330 
6331 	for (queue = 0; queue < rx_count; queue++) {
6332 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6333 
6334 		seq_printf(seq, "RX Queue %d:\n", queue);
6335 
6336 		if (priv->extend_desc) {
6337 			seq_printf(seq, "Extended descriptor ring:\n");
6338 			sysfs_display_ring((void *)rx_q->dma_erx,
6339 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6340 		} else {
6341 			seq_printf(seq, "Descriptor ring:\n");
6342 			sysfs_display_ring((void *)rx_q->dma_rx,
6343 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6344 		}
6345 	}
6346 
6347 	for (queue = 0; queue < tx_count; queue++) {
6348 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6349 
6350 		seq_printf(seq, "TX Queue %d:\n", queue);
6351 
6352 		if (priv->extend_desc) {
6353 			seq_printf(seq, "Extended descriptor ring:\n");
6354 			sysfs_display_ring((void *)tx_q->dma_etx,
6355 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6356 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6357 			seq_printf(seq, "Descriptor ring:\n");
6358 			sysfs_display_ring((void *)tx_q->dma_tx,
6359 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6360 		}
6361 	}
6362 
6363 	return 0;
6364 }
6365 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6366 
stmmac_dma_cap_show(struct seq_file * seq,void * v)6367 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6368 {
6369 	static const char * const dwxgmac_timestamp_source[] = {
6370 		"None",
6371 		"Internal",
6372 		"External",
6373 		"Both",
6374 	};
6375 	static const char * const dwxgmac_safety_feature_desc[] = {
6376 		"No",
6377 		"All Safety Features with ECC and Parity",
6378 		"All Safety Features without ECC or Parity",
6379 		"All Safety Features with Parity Only",
6380 		"ECC Only",
6381 		"UNDEFINED",
6382 		"UNDEFINED",
6383 		"UNDEFINED",
6384 	};
6385 	struct net_device *dev = seq->private;
6386 	struct stmmac_priv *priv = netdev_priv(dev);
6387 
6388 	if (!priv->hw_cap_support) {
6389 		seq_printf(seq, "DMA HW features not supported\n");
6390 		return 0;
6391 	}
6392 
6393 	seq_printf(seq, "==============================\n");
6394 	seq_printf(seq, "\tDMA HW features\n");
6395 	seq_printf(seq, "==============================\n");
6396 
6397 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6398 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6399 	seq_printf(seq, "\t1000 Mbps: %s\n",
6400 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6401 	seq_printf(seq, "\tHalf duplex: %s\n",
6402 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6403 	if (priv->plat->has_xgmac) {
6404 		seq_printf(seq,
6405 			   "\tNumber of Additional MAC address registers: %d\n",
6406 			   priv->dma_cap.multi_addr);
6407 	} else {
6408 		seq_printf(seq, "\tHash Filter: %s\n",
6409 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6410 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6411 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6412 	}
6413 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6414 		   (priv->dma_cap.pcs) ? "Y" : "N");
6415 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6416 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6417 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6418 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6419 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6420 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6421 	seq_printf(seq, "\tRMON module: %s\n",
6422 		   (priv->dma_cap.rmon) ? "Y" : "N");
6423 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6424 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6425 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6426 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6427 	if (priv->plat->has_xgmac)
6428 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6429 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6430 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6431 		   (priv->dma_cap.eee) ? "Y" : "N");
6432 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6433 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6434 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6435 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6436 	    priv->plat->has_xgmac) {
6437 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6438 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6439 	} else {
6440 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6441 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6442 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6443 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6444 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6445 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6446 	}
6447 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6448 		   priv->dma_cap.number_rx_channel);
6449 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6450 		   priv->dma_cap.number_tx_channel);
6451 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6452 		   priv->dma_cap.number_rx_queues);
6453 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6454 		   priv->dma_cap.number_tx_queues);
6455 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6456 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6457 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6458 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6459 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6460 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6461 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6462 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6463 		   priv->dma_cap.pps_out_num);
6464 	seq_printf(seq, "\tSafety Features: %s\n",
6465 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6466 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6467 		   priv->dma_cap.frpsel ? "Y" : "N");
6468 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6469 		   priv->dma_cap.host_dma_width);
6470 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6471 		   priv->dma_cap.rssen ? "Y" : "N");
6472 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6473 		   priv->dma_cap.vlhash ? "Y" : "N");
6474 	seq_printf(seq, "\tSplit Header: %s\n",
6475 		   priv->dma_cap.sphen ? "Y" : "N");
6476 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6477 		   priv->dma_cap.vlins ? "Y" : "N");
6478 	seq_printf(seq, "\tDouble VLAN: %s\n",
6479 		   priv->dma_cap.dvlan ? "Y" : "N");
6480 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6481 		   priv->dma_cap.l3l4fnum);
6482 	seq_printf(seq, "\tARP Offloading: %s\n",
6483 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6484 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6485 		   priv->dma_cap.estsel ? "Y" : "N");
6486 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6487 		   priv->dma_cap.fpesel ? "Y" : "N");
6488 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6489 		   priv->dma_cap.tbssel ? "Y" : "N");
6490 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6491 		   priv->dma_cap.tbs_ch_num);
6492 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6493 		   priv->dma_cap.sgfsel ? "Y" : "N");
6494 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6495 		   BIT(priv->dma_cap.ttsfd) >> 1);
6496 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6497 		   priv->dma_cap.numtc);
6498 	seq_printf(seq, "\tDCB Feature: %s\n",
6499 		   priv->dma_cap.dcben ? "Y" : "N");
6500 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6501 		   priv->dma_cap.advthword ? "Y" : "N");
6502 	seq_printf(seq, "\tPTP Offload: %s\n",
6503 		   priv->dma_cap.ptoen ? "Y" : "N");
6504 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6505 		   priv->dma_cap.osten ? "Y" : "N");
6506 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6507 		   priv->dma_cap.pfcen ? "Y" : "N");
6508 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6509 		   BIT(priv->dma_cap.frpes) << 6);
6510 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6511 		   BIT(priv->dma_cap.frpbs) << 6);
6512 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6513 		   priv->dma_cap.frppipe_num);
6514 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6515 		   priv->dma_cap.nrvf_num ?
6516 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6517 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6518 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6519 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6520 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6521 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6522 		   priv->dma_cap.cbtisel ? "Y" : "N");
6523 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6524 		   priv->dma_cap.aux_snapshot_n);
6525 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6526 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6527 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6528 		   priv->dma_cap.edma ? "Y" : "N");
6529 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6530 		   priv->dma_cap.ediffc ? "Y" : "N");
6531 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6532 		   priv->dma_cap.vxn ? "Y" : "N");
6533 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6534 		   priv->dma_cap.dbgmem ? "Y" : "N");
6535 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6536 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6537 	return 0;
6538 }
6539 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6540 
6541 /* Use network device events to rename debugfs file entries.
6542  */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6543 static int stmmac_device_event(struct notifier_block *unused,
6544 			       unsigned long event, void *ptr)
6545 {
6546 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6547 	struct stmmac_priv *priv = netdev_priv(dev);
6548 
6549 	if (dev->netdev_ops != &stmmac_netdev_ops)
6550 		goto done;
6551 
6552 	switch (event) {
6553 	case NETDEV_CHANGENAME:
6554 		if (priv->dbgfs_dir)
6555 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6556 							 priv->dbgfs_dir,
6557 							 stmmac_fs_dir,
6558 							 dev->name);
6559 		break;
6560 	}
6561 done:
6562 	return NOTIFY_DONE;
6563 }
6564 
6565 static struct notifier_block stmmac_notifier = {
6566 	.notifier_call = stmmac_device_event,
6567 };
6568 
stmmac_init_fs(struct net_device * dev)6569 static void stmmac_init_fs(struct net_device *dev)
6570 {
6571 	struct stmmac_priv *priv = netdev_priv(dev);
6572 
6573 	rtnl_lock();
6574 
6575 	/* Create per netdev entries */
6576 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6577 
6578 	/* Entry to report DMA RX/TX rings */
6579 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6580 			    &stmmac_rings_status_fops);
6581 
6582 	/* Entry to report the DMA HW features */
6583 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6584 			    &stmmac_dma_cap_fops);
6585 
6586 	rtnl_unlock();
6587 }
6588 
stmmac_exit_fs(struct net_device * dev)6589 static void stmmac_exit_fs(struct net_device *dev)
6590 {
6591 	struct stmmac_priv *priv = netdev_priv(dev);
6592 
6593 	debugfs_remove_recursive(priv->dbgfs_dir);
6594 }
6595 #endif /* CONFIG_DEBUG_FS */
6596 
stmmac_vid_crc32_le(__le16 vid_le)6597 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6598 {
6599 	unsigned char *data = (unsigned char *)&vid_le;
6600 	unsigned char data_byte = 0;
6601 	u32 crc = ~0x0;
6602 	u32 temp = 0;
6603 	int i, bits;
6604 
6605 	bits = get_bitmask_order(VLAN_VID_MASK);
6606 	for (i = 0; i < bits; i++) {
6607 		if ((i % 8) == 0)
6608 			data_byte = data[i / 8];
6609 
6610 		temp = ((crc & 1) ^ data_byte) & 1;
6611 		crc >>= 1;
6612 		data_byte >>= 1;
6613 
6614 		if (temp)
6615 			crc ^= 0xedb88320;
6616 	}
6617 
6618 	return crc;
6619 }
6620 
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6621 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6622 {
6623 	u32 crc, hash = 0;
6624 	u16 pmatch = 0;
6625 	int count = 0;
6626 	u16 vid = 0;
6627 
6628 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6629 		__le16 vid_le = cpu_to_le16(vid);
6630 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6631 		hash |= (1 << crc);
6632 		count++;
6633 	}
6634 
6635 	if (!priv->dma_cap.vlhash) {
6636 		if (count > 2) /* VID = 0 always passes filter */
6637 			return -EOPNOTSUPP;
6638 
6639 		pmatch = vid;
6640 		hash = 0;
6641 	}
6642 
6643 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6644 }
6645 
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6646 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6647 {
6648 	struct stmmac_priv *priv = netdev_priv(ndev);
6649 	bool is_double = false;
6650 	int ret;
6651 
6652 	ret = pm_runtime_resume_and_get(priv->device);
6653 	if (ret < 0)
6654 		return ret;
6655 
6656 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6657 		is_double = true;
6658 
6659 	set_bit(vid, priv->active_vlans);
6660 	ret = stmmac_vlan_update(priv, is_double);
6661 	if (ret) {
6662 		clear_bit(vid, priv->active_vlans);
6663 		goto err_pm_put;
6664 	}
6665 
6666 	if (priv->hw->num_vlan) {
6667 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6668 		if (ret)
6669 			goto err_pm_put;
6670 	}
6671 err_pm_put:
6672 	pm_runtime_put(priv->device);
6673 
6674 	return ret;
6675 }
6676 
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6677 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6678 {
6679 	struct stmmac_priv *priv = netdev_priv(ndev);
6680 	bool is_double = false;
6681 	int ret;
6682 
6683 	ret = pm_runtime_resume_and_get(priv->device);
6684 	if (ret < 0)
6685 		return ret;
6686 
6687 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6688 		is_double = true;
6689 
6690 	clear_bit(vid, priv->active_vlans);
6691 
6692 	if (priv->hw->num_vlan) {
6693 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6694 		if (ret)
6695 			goto del_vlan_error;
6696 	}
6697 
6698 	ret = stmmac_vlan_update(priv, is_double);
6699 
6700 del_vlan_error:
6701 	pm_runtime_put(priv->device);
6702 
6703 	return ret;
6704 }
6705 
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6706 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6707 {
6708 	struct stmmac_priv *priv = netdev_priv(dev);
6709 
6710 	switch (bpf->command) {
6711 	case XDP_SETUP_PROG:
6712 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6713 	case XDP_SETUP_XSK_POOL:
6714 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6715 					     bpf->xsk.queue_id);
6716 	default:
6717 		return -EOPNOTSUPP;
6718 	}
6719 }
6720 
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6721 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6722 			   struct xdp_frame **frames, u32 flags)
6723 {
6724 	struct stmmac_priv *priv = netdev_priv(dev);
6725 	int cpu = smp_processor_id();
6726 	struct netdev_queue *nq;
6727 	int i, nxmit = 0;
6728 	int queue;
6729 
6730 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6731 		return -ENETDOWN;
6732 
6733 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6734 		return -EINVAL;
6735 
6736 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6737 	nq = netdev_get_tx_queue(priv->dev, queue);
6738 
6739 	__netif_tx_lock(nq, cpu);
6740 	/* Avoids TX time-out as we are sharing with slow path */
6741 	txq_trans_cond_update(nq);
6742 
6743 	for (i = 0; i < num_frames; i++) {
6744 		int res;
6745 
6746 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6747 		if (res == STMMAC_XDP_CONSUMED)
6748 			break;
6749 
6750 		nxmit++;
6751 	}
6752 
6753 	if (flags & XDP_XMIT_FLUSH) {
6754 		stmmac_flush_tx_descriptors(priv, queue);
6755 		stmmac_tx_timer_arm(priv, queue);
6756 	}
6757 
6758 	__netif_tx_unlock(nq);
6759 
6760 	return nxmit;
6761 }
6762 
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6763 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6764 {
6765 	struct stmmac_channel *ch = &priv->channel[queue];
6766 	unsigned long flags;
6767 
6768 	spin_lock_irqsave(&ch->lock, flags);
6769 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6770 	spin_unlock_irqrestore(&ch->lock, flags);
6771 
6772 	stmmac_stop_rx_dma(priv, queue);
6773 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6774 }
6775 
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6776 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6777 {
6778 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6779 	struct stmmac_channel *ch = &priv->channel[queue];
6780 	unsigned long flags;
6781 	u32 buf_size;
6782 	int ret;
6783 
6784 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6785 	if (ret) {
6786 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6787 		return;
6788 	}
6789 
6790 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6791 	if (ret) {
6792 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6793 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6794 		return;
6795 	}
6796 
6797 	stmmac_reset_rx_queue(priv, queue);
6798 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6799 
6800 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6801 			    rx_q->dma_rx_phy, rx_q->queue_index);
6802 
6803 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6804 			     sizeof(struct dma_desc));
6805 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6806 			       rx_q->rx_tail_addr, rx_q->queue_index);
6807 
6808 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6809 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6810 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6811 				      buf_size,
6812 				      rx_q->queue_index);
6813 	} else {
6814 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6815 				      priv->dma_conf.dma_buf_sz,
6816 				      rx_q->queue_index);
6817 	}
6818 
6819 	stmmac_start_rx_dma(priv, queue);
6820 
6821 	spin_lock_irqsave(&ch->lock, flags);
6822 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6823 	spin_unlock_irqrestore(&ch->lock, flags);
6824 }
6825 
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6826 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6827 {
6828 	struct stmmac_channel *ch = &priv->channel[queue];
6829 	unsigned long flags;
6830 
6831 	spin_lock_irqsave(&ch->lock, flags);
6832 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6833 	spin_unlock_irqrestore(&ch->lock, flags);
6834 
6835 	stmmac_stop_tx_dma(priv, queue);
6836 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6837 }
6838 
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6839 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6840 {
6841 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6842 	struct stmmac_channel *ch = &priv->channel[queue];
6843 	unsigned long flags;
6844 	int ret;
6845 
6846 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6847 	if (ret) {
6848 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6849 		return;
6850 	}
6851 
6852 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6853 	if (ret) {
6854 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6855 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6856 		return;
6857 	}
6858 
6859 	stmmac_reset_tx_queue(priv, queue);
6860 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6861 
6862 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6863 			    tx_q->dma_tx_phy, tx_q->queue_index);
6864 
6865 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6866 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6867 
6868 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6869 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6870 			       tx_q->tx_tail_addr, tx_q->queue_index);
6871 
6872 	stmmac_start_tx_dma(priv, queue);
6873 
6874 	spin_lock_irqsave(&ch->lock, flags);
6875 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6876 	spin_unlock_irqrestore(&ch->lock, flags);
6877 }
6878 
stmmac_xdp_release(struct net_device * dev)6879 void stmmac_xdp_release(struct net_device *dev)
6880 {
6881 	struct stmmac_priv *priv = netdev_priv(dev);
6882 	u32 chan;
6883 
6884 	/* Ensure tx function is not running */
6885 	netif_tx_disable(dev);
6886 
6887 	/* Disable NAPI process */
6888 	stmmac_disable_all_queues(priv);
6889 
6890 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6891 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6892 
6893 	/* Free the IRQ lines */
6894 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6895 
6896 	/* Stop TX/RX DMA channels */
6897 	stmmac_stop_all_dma(priv);
6898 
6899 	/* Release and free the Rx/Tx resources */
6900 	free_dma_desc_resources(priv, &priv->dma_conf);
6901 
6902 	/* Disable the MAC Rx/Tx */
6903 	stmmac_mac_set(priv, priv->ioaddr, false);
6904 
6905 	/* set trans_start so we don't get spurious
6906 	 * watchdogs during reset
6907 	 */
6908 	netif_trans_update(dev);
6909 	netif_carrier_off(dev);
6910 }
6911 
stmmac_xdp_open(struct net_device * dev)6912 int stmmac_xdp_open(struct net_device *dev)
6913 {
6914 	struct stmmac_priv *priv = netdev_priv(dev);
6915 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6916 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6917 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6918 	struct stmmac_rx_queue *rx_q;
6919 	struct stmmac_tx_queue *tx_q;
6920 	u32 buf_size;
6921 	bool sph_en;
6922 	u32 chan;
6923 	int ret;
6924 
6925 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6926 	if (ret < 0) {
6927 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6928 			   __func__);
6929 		goto dma_desc_error;
6930 	}
6931 
6932 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6933 	if (ret < 0) {
6934 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6935 			   __func__);
6936 		goto init_error;
6937 	}
6938 
6939 	stmmac_reset_queues_param(priv);
6940 
6941 	/* DMA CSR Channel configuration */
6942 	for (chan = 0; chan < dma_csr_ch; chan++) {
6943 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6944 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6945 	}
6946 
6947 	/* Adjust Split header */
6948 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6949 
6950 	/* DMA RX Channel Configuration */
6951 	for (chan = 0; chan < rx_cnt; chan++) {
6952 		rx_q = &priv->dma_conf.rx_queue[chan];
6953 
6954 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6955 				    rx_q->dma_rx_phy, chan);
6956 
6957 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6958 				     (rx_q->buf_alloc_num *
6959 				      sizeof(struct dma_desc));
6960 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6961 				       rx_q->rx_tail_addr, chan);
6962 
6963 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6964 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6965 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6966 					      buf_size,
6967 					      rx_q->queue_index);
6968 		} else {
6969 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6970 					      priv->dma_conf.dma_buf_sz,
6971 					      rx_q->queue_index);
6972 		}
6973 
6974 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6975 	}
6976 
6977 	/* DMA TX Channel Configuration */
6978 	for (chan = 0; chan < tx_cnt; chan++) {
6979 		tx_q = &priv->dma_conf.tx_queue[chan];
6980 
6981 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6982 				    tx_q->dma_tx_phy, chan);
6983 
6984 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6985 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6986 				       tx_q->tx_tail_addr, chan);
6987 
6988 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6989 		tx_q->txtimer.function = stmmac_tx_timer;
6990 	}
6991 
6992 	/* Enable the MAC Rx/Tx */
6993 	stmmac_mac_set(priv, priv->ioaddr, true);
6994 
6995 	/* Start Rx & Tx DMA Channels */
6996 	stmmac_start_all_dma(priv);
6997 
6998 	ret = stmmac_request_irq(dev);
6999 	if (ret)
7000 		goto irq_error;
7001 
7002 	/* Enable NAPI process*/
7003 	stmmac_enable_all_queues(priv);
7004 	netif_carrier_on(dev);
7005 	netif_tx_start_all_queues(dev);
7006 	stmmac_enable_all_dma_irq(priv);
7007 
7008 	return 0;
7009 
7010 irq_error:
7011 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7012 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7013 
7014 	stmmac_hw_teardown(dev);
7015 init_error:
7016 	free_dma_desc_resources(priv, &priv->dma_conf);
7017 dma_desc_error:
7018 	return ret;
7019 }
7020 
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)7021 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7022 {
7023 	struct stmmac_priv *priv = netdev_priv(dev);
7024 	struct stmmac_rx_queue *rx_q;
7025 	struct stmmac_tx_queue *tx_q;
7026 	struct stmmac_channel *ch;
7027 
7028 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7029 	    !netif_carrier_ok(priv->dev))
7030 		return -ENETDOWN;
7031 
7032 	if (!stmmac_xdp_is_enabled(priv))
7033 		return -EINVAL;
7034 
7035 	if (queue >= priv->plat->rx_queues_to_use ||
7036 	    queue >= priv->plat->tx_queues_to_use)
7037 		return -EINVAL;
7038 
7039 	rx_q = &priv->dma_conf.rx_queue[queue];
7040 	tx_q = &priv->dma_conf.tx_queue[queue];
7041 	ch = &priv->channel[queue];
7042 
7043 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7044 		return -EINVAL;
7045 
7046 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7047 		/* EQoS does not have per-DMA channel SW interrupt,
7048 		 * so we schedule RX Napi straight-away.
7049 		 */
7050 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7051 			__napi_schedule(&ch->rxtx_napi);
7052 	}
7053 
7054 	return 0;
7055 }
7056 
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)7057 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7058 {
7059 	struct stmmac_priv *priv = netdev_priv(dev);
7060 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7061 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7062 	unsigned int start;
7063 	int q;
7064 
7065 	for (q = 0; q < tx_cnt; q++) {
7066 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7067 		u64 tx_packets;
7068 		u64 tx_bytes;
7069 
7070 		do {
7071 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7072 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7073 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7074 		do {
7075 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7076 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7077 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7078 
7079 		stats->tx_packets += tx_packets;
7080 		stats->tx_bytes += tx_bytes;
7081 	}
7082 
7083 	for (q = 0; q < rx_cnt; q++) {
7084 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7085 		u64 rx_packets;
7086 		u64 rx_bytes;
7087 
7088 		do {
7089 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7090 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7091 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7092 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7093 
7094 		stats->rx_packets += rx_packets;
7095 		stats->rx_bytes += rx_bytes;
7096 	}
7097 
7098 	stats->rx_dropped = priv->xstats.rx_dropped;
7099 	stats->rx_errors = priv->xstats.rx_errors;
7100 	stats->tx_dropped = priv->xstats.tx_dropped;
7101 	stats->tx_errors = priv->xstats.tx_errors;
7102 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7103 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7104 	stats->rx_length_errors = priv->xstats.rx_length;
7105 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7106 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7107 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7108 }
7109 
7110 static const struct net_device_ops stmmac_netdev_ops = {
7111 	.ndo_open = stmmac_open,
7112 	.ndo_start_xmit = stmmac_xmit,
7113 	.ndo_stop = stmmac_release,
7114 	.ndo_change_mtu = stmmac_change_mtu,
7115 	.ndo_fix_features = stmmac_fix_features,
7116 	.ndo_set_features = stmmac_set_features,
7117 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7118 	.ndo_tx_timeout = stmmac_tx_timeout,
7119 	.ndo_eth_ioctl = stmmac_ioctl,
7120 	.ndo_get_stats64 = stmmac_get_stats64,
7121 	.ndo_setup_tc = stmmac_setup_tc,
7122 	.ndo_select_queue = stmmac_select_queue,
7123 	.ndo_set_mac_address = stmmac_set_mac_address,
7124 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7125 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7126 	.ndo_bpf = stmmac_bpf,
7127 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7128 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7129 };
7130 
stmmac_reset_subtask(struct stmmac_priv * priv)7131 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7132 {
7133 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7134 		return;
7135 	if (test_bit(STMMAC_DOWN, &priv->state))
7136 		return;
7137 
7138 	netdev_err(priv->dev, "Reset adapter.\n");
7139 
7140 	rtnl_lock();
7141 	netif_trans_update(priv->dev);
7142 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7143 		usleep_range(1000, 2000);
7144 
7145 	set_bit(STMMAC_DOWN, &priv->state);
7146 	dev_close(priv->dev);
7147 	dev_open(priv->dev, NULL);
7148 	clear_bit(STMMAC_DOWN, &priv->state);
7149 	clear_bit(STMMAC_RESETING, &priv->state);
7150 	rtnl_unlock();
7151 }
7152 
stmmac_service_task(struct work_struct * work)7153 static void stmmac_service_task(struct work_struct *work)
7154 {
7155 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7156 			service_task);
7157 
7158 	stmmac_reset_subtask(priv);
7159 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7160 }
7161 
7162 /**
7163  *  stmmac_hw_init - Init the MAC device
7164  *  @priv: driver private structure
7165  *  Description: this function is to configure the MAC device according to
7166  *  some platform parameters or the HW capability register. It prepares the
7167  *  driver to use either ring or chain modes and to setup either enhanced or
7168  *  normal descriptors.
7169  */
stmmac_hw_init(struct stmmac_priv * priv)7170 static int stmmac_hw_init(struct stmmac_priv *priv)
7171 {
7172 	int ret;
7173 
7174 	/* dwmac-sun8i only work in chain mode */
7175 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7176 		chain_mode = 1;
7177 	priv->chain_mode = chain_mode;
7178 
7179 	/* Initialize HW Interface */
7180 	ret = stmmac_hwif_init(priv);
7181 	if (ret)
7182 		return ret;
7183 
7184 	/* Get the HW capability (new GMAC newer than 3.50a) */
7185 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7186 	if (priv->hw_cap_support) {
7187 		dev_info(priv->device, "DMA HW capability register supported\n");
7188 
7189 		/* We can override some gmac/dma configuration fields: e.g.
7190 		 * enh_desc, tx_coe (e.g. that are passed through the
7191 		 * platform) with the values from the HW capability
7192 		 * register (if supported).
7193 		 */
7194 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7195 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7196 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7197 		priv->hw->pmt = priv->plat->pmt;
7198 		if (priv->dma_cap.hash_tb_sz) {
7199 			priv->hw->multicast_filter_bins =
7200 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7201 			priv->hw->mcast_bits_log2 =
7202 					ilog2(priv->hw->multicast_filter_bins);
7203 		}
7204 
7205 		/* TXCOE doesn't work in thresh DMA mode */
7206 		if (priv->plat->force_thresh_dma_mode)
7207 			priv->plat->tx_coe = 0;
7208 		else
7209 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7210 
7211 		/* In case of GMAC4 rx_coe is from HW cap register. */
7212 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7213 
7214 		if (priv->dma_cap.rx_coe_type2)
7215 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7216 		else if (priv->dma_cap.rx_coe_type1)
7217 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7218 
7219 	} else {
7220 		dev_info(priv->device, "No HW DMA feature register supported\n");
7221 	}
7222 
7223 	if (priv->plat->rx_coe) {
7224 		priv->hw->rx_csum = priv->plat->rx_coe;
7225 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7226 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7227 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7228 	}
7229 	if (priv->plat->tx_coe)
7230 		dev_info(priv->device, "TX Checksum insertion supported\n");
7231 
7232 	if (priv->plat->pmt) {
7233 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7234 		device_set_wakeup_capable(priv->device, 1);
7235 	}
7236 
7237 	if (priv->dma_cap.tsoen)
7238 		dev_info(priv->device, "TSO supported\n");
7239 
7240 	if (priv->dma_cap.number_rx_queues &&
7241 	    priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7242 		dev_warn(priv->device,
7243 			 "Number of Rx queues (%u) exceeds dma capability\n",
7244 			 priv->plat->rx_queues_to_use);
7245 		priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7246 	}
7247 	if (priv->dma_cap.number_tx_queues &&
7248 	    priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7249 		dev_warn(priv->device,
7250 			 "Number of Tx queues (%u) exceeds dma capability\n",
7251 			 priv->plat->tx_queues_to_use);
7252 		priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7253 	}
7254 
7255 	if (priv->dma_cap.rx_fifo_size &&
7256 	    priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7257 		dev_warn(priv->device,
7258 			 "Rx FIFO size (%u) exceeds dma capability\n",
7259 			 priv->plat->rx_fifo_size);
7260 		priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7261 	}
7262 	if (priv->dma_cap.tx_fifo_size &&
7263 	    priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7264 		dev_warn(priv->device,
7265 			 "Tx FIFO size (%u) exceeds dma capability\n",
7266 			 priv->plat->tx_fifo_size);
7267 		priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7268 	}
7269 
7270 	priv->hw->vlan_fail_q_en =
7271 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7272 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7273 
7274 	/* Run HW quirks, if any */
7275 	if (priv->hwif_quirks) {
7276 		ret = priv->hwif_quirks(priv);
7277 		if (ret)
7278 			return ret;
7279 	}
7280 
7281 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7282 	 * In some case, for example on bugged HW this feature
7283 	 * has to be disable and this can be done by passing the
7284 	 * riwt_off field from the platform.
7285 	 */
7286 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7287 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7288 		priv->use_riwt = 1;
7289 		dev_info(priv->device,
7290 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7291 	}
7292 
7293 	return 0;
7294 }
7295 
stmmac_napi_add(struct net_device * dev)7296 static void stmmac_napi_add(struct net_device *dev)
7297 {
7298 	struct stmmac_priv *priv = netdev_priv(dev);
7299 	u32 queue, maxq;
7300 
7301 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7302 
7303 	for (queue = 0; queue < maxq; queue++) {
7304 		struct stmmac_channel *ch = &priv->channel[queue];
7305 
7306 		ch->priv_data = priv;
7307 		ch->index = queue;
7308 		spin_lock_init(&ch->lock);
7309 
7310 		if (queue < priv->plat->rx_queues_to_use) {
7311 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7312 		}
7313 		if (queue < priv->plat->tx_queues_to_use) {
7314 			netif_napi_add_tx(dev, &ch->tx_napi,
7315 					  stmmac_napi_poll_tx);
7316 		}
7317 		if (queue < priv->plat->rx_queues_to_use &&
7318 		    queue < priv->plat->tx_queues_to_use) {
7319 			netif_napi_add(dev, &ch->rxtx_napi,
7320 				       stmmac_napi_poll_rxtx);
7321 		}
7322 	}
7323 }
7324 
stmmac_napi_del(struct net_device * dev)7325 static void stmmac_napi_del(struct net_device *dev)
7326 {
7327 	struct stmmac_priv *priv = netdev_priv(dev);
7328 	u32 queue, maxq;
7329 
7330 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7331 
7332 	for (queue = 0; queue < maxq; queue++) {
7333 		struct stmmac_channel *ch = &priv->channel[queue];
7334 
7335 		if (queue < priv->plat->rx_queues_to_use)
7336 			netif_napi_del(&ch->rx_napi);
7337 		if (queue < priv->plat->tx_queues_to_use)
7338 			netif_napi_del(&ch->tx_napi);
7339 		if (queue < priv->plat->rx_queues_to_use &&
7340 		    queue < priv->plat->tx_queues_to_use) {
7341 			netif_napi_del(&ch->rxtx_napi);
7342 		}
7343 	}
7344 }
7345 
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7346 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7347 {
7348 	struct stmmac_priv *priv = netdev_priv(dev);
7349 	int ret = 0, i;
7350 
7351 	if (netif_running(dev))
7352 		stmmac_release(dev);
7353 
7354 	stmmac_napi_del(dev);
7355 
7356 	priv->plat->rx_queues_to_use = rx_cnt;
7357 	priv->plat->tx_queues_to_use = tx_cnt;
7358 	if (!netif_is_rxfh_configured(dev))
7359 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7360 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7361 									rx_cnt);
7362 
7363 	stmmac_napi_add(dev);
7364 
7365 	if (netif_running(dev))
7366 		ret = stmmac_open(dev);
7367 
7368 	return ret;
7369 }
7370 
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7371 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7372 {
7373 	struct stmmac_priv *priv = netdev_priv(dev);
7374 	int ret = 0;
7375 
7376 	if (netif_running(dev))
7377 		stmmac_release(dev);
7378 
7379 	priv->dma_conf.dma_rx_size = rx_size;
7380 	priv->dma_conf.dma_tx_size = tx_size;
7381 
7382 	if (netif_running(dev))
7383 		ret = stmmac_open(dev);
7384 
7385 	return ret;
7386 }
7387 
7388 /**
7389  * stmmac_fpe_verify_timer - Timer for MAC Merge verification
7390  * @t:  timer_list struct containing private info
7391  *
7392  * Verify the MAC Merge capability in the local TX direction, by
7393  * transmitting Verify mPackets up to 3 times. Wait until link
7394  * partner responds with a Response mPacket, otherwise fail.
7395  */
stmmac_fpe_verify_timer(struct timer_list * t)7396 static void stmmac_fpe_verify_timer(struct timer_list *t)
7397 {
7398 	struct stmmac_fpe_cfg *fpe_cfg = from_timer(fpe_cfg, t, verify_timer);
7399 	struct stmmac_priv *priv = container_of(fpe_cfg, struct stmmac_priv,
7400 						fpe_cfg);
7401 	unsigned long flags;
7402 	bool rearm = false;
7403 
7404 	spin_lock_irqsave(&fpe_cfg->lock, flags);
7405 
7406 	switch (fpe_cfg->status) {
7407 	case ETHTOOL_MM_VERIFY_STATUS_INITIAL:
7408 	case ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
7409 		if (fpe_cfg->verify_retries != 0) {
7410 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7411 						fpe_cfg, MPACKET_VERIFY);
7412 			rearm = true;
7413 		} else {
7414 			fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
7415 		}
7416 
7417 		fpe_cfg->verify_retries--;
7418 		break;
7419 
7420 	case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
7421 		stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
7422 				     priv->plat->tx_queues_to_use,
7423 				     priv->plat->rx_queues_to_use,
7424 				     true, true);
7425 		break;
7426 
7427 	default:
7428 		break;
7429 	}
7430 
7431 	if (rearm) {
7432 		mod_timer(&fpe_cfg->verify_timer,
7433 			  jiffies + msecs_to_jiffies(fpe_cfg->verify_time));
7434 	}
7435 
7436 	spin_unlock_irqrestore(&fpe_cfg->lock, flags);
7437 }
7438 
stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg * fpe_cfg)7439 static void stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg *fpe_cfg)
7440 {
7441 	if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled &&
7442 	    fpe_cfg->verify_enabled &&
7443 	    fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED &&
7444 	    fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) {
7445 		timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0);
7446 		mod_timer(&fpe_cfg->verify_timer, jiffies);
7447 	}
7448 }
7449 
stmmac_fpe_apply(struct stmmac_priv * priv)7450 void stmmac_fpe_apply(struct stmmac_priv *priv)
7451 {
7452 	struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
7453 
7454 	/* If verification is disabled, configure FPE right away.
7455 	 * Otherwise let the timer code do it.
7456 	 */
7457 	if (!fpe_cfg->verify_enabled) {
7458 		stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
7459 				     priv->plat->tx_queues_to_use,
7460 				     priv->plat->rx_queues_to_use,
7461 				     fpe_cfg->tx_enabled,
7462 				     fpe_cfg->pmac_enabled);
7463 	} else {
7464 		fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL;
7465 		fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
7466 
7467 		if (netif_running(priv->dev))
7468 			stmmac_fpe_verify_timer_arm(fpe_cfg);
7469 	}
7470 }
7471 
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7472 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7473 {
7474 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7475 	struct dma_desc *desc_contains_ts = ctx->desc;
7476 	struct stmmac_priv *priv = ctx->priv;
7477 	struct dma_desc *ndesc = ctx->ndesc;
7478 	struct dma_desc *desc = ctx->desc;
7479 	u64 ns = 0;
7480 
7481 	if (!priv->hwts_rx_en)
7482 		return -ENODATA;
7483 
7484 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7485 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7486 		desc_contains_ts = ndesc;
7487 
7488 	/* Check if timestamp is available */
7489 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7490 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7491 		ns -= priv->plat->cdc_error_adj;
7492 		*timestamp = ns_to_ktime(ns);
7493 		return 0;
7494 	}
7495 
7496 	return -ENODATA;
7497 }
7498 
7499 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7500 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7501 };
7502 
7503 /**
7504  * stmmac_dvr_probe
7505  * @device: device pointer
7506  * @plat_dat: platform data pointer
7507  * @res: stmmac resource pointer
7508  * Description: this is the main probe function used to
7509  * call the alloc_etherdev, allocate the priv structure.
7510  * Return:
7511  * returns 0 on success, otherwise errno.
7512  */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7513 int stmmac_dvr_probe(struct device *device,
7514 		     struct plat_stmmacenet_data *plat_dat,
7515 		     struct stmmac_resources *res)
7516 {
7517 	struct net_device *ndev = NULL;
7518 	struct stmmac_priv *priv;
7519 	u32 rxq;
7520 	int i, ret = 0;
7521 
7522 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7523 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7524 	if (!ndev)
7525 		return -ENOMEM;
7526 
7527 	SET_NETDEV_DEV(ndev, device);
7528 
7529 	priv = netdev_priv(ndev);
7530 	priv->device = device;
7531 	priv->dev = ndev;
7532 
7533 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7534 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7535 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7536 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7537 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7538 	}
7539 
7540 	priv->xstats.pcpu_stats =
7541 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7542 	if (!priv->xstats.pcpu_stats)
7543 		return -ENOMEM;
7544 
7545 	stmmac_set_ethtool_ops(ndev);
7546 	priv->pause = pause;
7547 	priv->plat = plat_dat;
7548 	priv->ioaddr = res->addr;
7549 	priv->dev->base_addr = (unsigned long)res->addr;
7550 	priv->plat->dma_cfg->multi_msi_en =
7551 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7552 
7553 	priv->dev->irq = res->irq;
7554 	priv->wol_irq = res->wol_irq;
7555 	priv->lpi_irq = res->lpi_irq;
7556 	priv->sfty_irq = res->sfty_irq;
7557 	priv->sfty_ce_irq = res->sfty_ce_irq;
7558 	priv->sfty_ue_irq = res->sfty_ue_irq;
7559 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7560 		priv->rx_irq[i] = res->rx_irq[i];
7561 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7562 		priv->tx_irq[i] = res->tx_irq[i];
7563 
7564 	if (!is_zero_ether_addr(res->mac))
7565 		eth_hw_addr_set(priv->dev, res->mac);
7566 
7567 	dev_set_drvdata(device, priv->dev);
7568 
7569 	/* Verify driver arguments */
7570 	stmmac_verify_args();
7571 
7572 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7573 	if (!priv->af_xdp_zc_qps)
7574 		return -ENOMEM;
7575 
7576 	/* Allocate workqueue */
7577 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7578 	if (!priv->wq) {
7579 		dev_err(priv->device, "failed to create workqueue\n");
7580 		ret = -ENOMEM;
7581 		goto error_wq_init;
7582 	}
7583 
7584 	INIT_WORK(&priv->service_task, stmmac_service_task);
7585 
7586 	/* Override with kernel parameters if supplied XXX CRS XXX
7587 	 * this needs to have multiple instances
7588 	 */
7589 	if ((phyaddr >= 0) && (phyaddr <= 31))
7590 		priv->plat->phy_addr = phyaddr;
7591 
7592 	if (priv->plat->stmmac_rst) {
7593 		ret = reset_control_assert(priv->plat->stmmac_rst);
7594 		reset_control_deassert(priv->plat->stmmac_rst);
7595 		/* Some reset controllers have only reset callback instead of
7596 		 * assert + deassert callbacks pair.
7597 		 */
7598 		if (ret == -ENOTSUPP)
7599 			reset_control_reset(priv->plat->stmmac_rst);
7600 	}
7601 
7602 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7603 	if (ret == -ENOTSUPP)
7604 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7605 			ERR_PTR(ret));
7606 
7607 	/* Wait a bit for the reset to take effect */
7608 	udelay(10);
7609 
7610 	/* Init MAC and get the capabilities */
7611 	ret = stmmac_hw_init(priv);
7612 	if (ret)
7613 		goto error_hw_init;
7614 
7615 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7616 	 */
7617 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7618 		priv->plat->dma_cfg->dche = false;
7619 
7620 	stmmac_check_ether_addr(priv);
7621 
7622 	ndev->netdev_ops = &stmmac_netdev_ops;
7623 
7624 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7625 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7626 
7627 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7628 			    NETIF_F_RXCSUM;
7629 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7630 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7631 
7632 	ret = stmmac_tc_init(priv, priv);
7633 	if (!ret) {
7634 		ndev->hw_features |= NETIF_F_HW_TC;
7635 	}
7636 
7637 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7638 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7639 		if (priv->plat->has_gmac4)
7640 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7641 		priv->tso = true;
7642 		dev_info(priv->device, "TSO feature enabled\n");
7643 	}
7644 
7645 	if (priv->dma_cap.sphen &&
7646 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7647 		ndev->hw_features |= NETIF_F_GRO;
7648 		priv->sph_cap = true;
7649 		priv->sph = priv->sph_cap;
7650 		dev_info(priv->device, "SPH feature enabled\n");
7651 	}
7652 
7653 	/* Ideally our host DMA address width is the same as for the
7654 	 * device. However, it may differ and then we have to use our
7655 	 * host DMA width for allocation and the device DMA width for
7656 	 * register handling.
7657 	 */
7658 	if (priv->plat->host_dma_width)
7659 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7660 	else
7661 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7662 
7663 	if (priv->dma_cap.host_dma_width) {
7664 		ret = dma_set_mask_and_coherent(device,
7665 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7666 		if (!ret) {
7667 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7668 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7669 
7670 			/*
7671 			 * If more than 32 bits can be addressed, make sure to
7672 			 * enable enhanced addressing mode.
7673 			 */
7674 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7675 				priv->plat->dma_cfg->eame = true;
7676 		} else {
7677 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7678 			if (ret) {
7679 				dev_err(priv->device, "Failed to set DMA Mask\n");
7680 				goto error_hw_init;
7681 			}
7682 
7683 			priv->dma_cap.host_dma_width = 32;
7684 		}
7685 	}
7686 
7687 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7688 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7689 #ifdef STMMAC_VLAN_TAG_USED
7690 	/* Both mac100 and gmac support receive VLAN tag detection */
7691 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7692 	if (priv->plat->has_gmac4) {
7693 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7694 		priv->hw->hw_vlan_en = true;
7695 	}
7696 	if (priv->dma_cap.vlhash) {
7697 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7698 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7699 	}
7700 	if (priv->dma_cap.vlins) {
7701 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7702 		if (priv->dma_cap.dvlan)
7703 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7704 	}
7705 #endif
7706 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7707 
7708 	priv->xstats.threshold = tc;
7709 
7710 	/* Initialize RSS */
7711 	rxq = priv->plat->rx_queues_to_use;
7712 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7713 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7714 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7715 
7716 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7717 		ndev->features |= NETIF_F_RXHASH;
7718 
7719 	ndev->vlan_features |= ndev->features;
7720 
7721 	/* MTU range: 46 - hw-specific max */
7722 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7723 	if (priv->plat->has_xgmac)
7724 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7725 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7726 		ndev->max_mtu = JUMBO_LEN;
7727 	else
7728 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7729 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7730 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7731 	 */
7732 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7733 	    (priv->plat->maxmtu >= ndev->min_mtu))
7734 		ndev->max_mtu = priv->plat->maxmtu;
7735 	else if (priv->plat->maxmtu < ndev->min_mtu)
7736 		dev_warn(priv->device,
7737 			 "%s: warning: maxmtu having invalid value (%d)\n",
7738 			 __func__, priv->plat->maxmtu);
7739 
7740 	if (flow_ctrl)
7741 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7742 
7743 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7744 
7745 	/* Setup channels NAPI */
7746 	stmmac_napi_add(ndev);
7747 
7748 	mutex_init(&priv->lock);
7749 
7750 	priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
7751 	priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
7752 	priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
7753 	timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0);
7754 	spin_lock_init(&priv->fpe_cfg.lock);
7755 
7756 	/* If a specific clk_csr value is passed from the platform
7757 	 * this means that the CSR Clock Range selection cannot be
7758 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7759 	 * set the MDC clock dynamically according to the csr actual
7760 	 * clock input.
7761 	 */
7762 	if (priv->plat->clk_csr >= 0)
7763 		priv->clk_csr = priv->plat->clk_csr;
7764 	else
7765 		stmmac_clk_csr_set(priv);
7766 
7767 	stmmac_check_pcs_mode(priv);
7768 
7769 	pm_runtime_get_noresume(device);
7770 	pm_runtime_set_active(device);
7771 	if (!pm_runtime_enabled(device))
7772 		pm_runtime_enable(device);
7773 
7774 	ret = stmmac_mdio_register(ndev);
7775 	if (ret < 0) {
7776 		dev_err_probe(priv->device, ret,
7777 			      "MDIO bus (id: %d) registration failed\n",
7778 			      priv->plat->bus_id);
7779 		goto error_mdio_register;
7780 	}
7781 
7782 	if (priv->plat->speed_mode_2500)
7783 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7784 
7785 	ret = stmmac_pcs_setup(ndev);
7786 	if (ret)
7787 		goto error_pcs_setup;
7788 
7789 	ret = stmmac_phy_setup(priv);
7790 	if (ret) {
7791 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7792 		goto error_phy_setup;
7793 	}
7794 
7795 	ret = register_netdev(ndev);
7796 	if (ret) {
7797 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7798 			__func__, ret);
7799 		goto error_netdev_register;
7800 	}
7801 
7802 #ifdef CONFIG_DEBUG_FS
7803 	stmmac_init_fs(ndev);
7804 #endif
7805 
7806 	if (priv->plat->dump_debug_regs)
7807 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7808 
7809 	/* Let pm_runtime_put() disable the clocks.
7810 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7811 	 */
7812 	pm_runtime_put(device);
7813 
7814 	return ret;
7815 
7816 error_netdev_register:
7817 	phylink_destroy(priv->phylink);
7818 error_phy_setup:
7819 	stmmac_pcs_clean(ndev);
7820 error_pcs_setup:
7821 	stmmac_mdio_unregister(ndev);
7822 error_mdio_register:
7823 	stmmac_napi_del(ndev);
7824 error_hw_init:
7825 	destroy_workqueue(priv->wq);
7826 error_wq_init:
7827 	bitmap_free(priv->af_xdp_zc_qps);
7828 
7829 	return ret;
7830 }
7831 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7832 
7833 /**
7834  * stmmac_dvr_remove
7835  * @dev: device pointer
7836  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7837  * changes the link status, releases the DMA descriptor rings.
7838  */
stmmac_dvr_remove(struct device * dev)7839 void stmmac_dvr_remove(struct device *dev)
7840 {
7841 	struct net_device *ndev = dev_get_drvdata(dev);
7842 	struct stmmac_priv *priv = netdev_priv(ndev);
7843 
7844 	netdev_info(priv->dev, "%s: removing driver", __func__);
7845 
7846 	pm_runtime_get_sync(dev);
7847 
7848 	stmmac_stop_all_dma(priv);
7849 	stmmac_mac_set(priv, priv->ioaddr, false);
7850 	unregister_netdev(ndev);
7851 
7852 #ifdef CONFIG_DEBUG_FS
7853 	stmmac_exit_fs(ndev);
7854 #endif
7855 	phylink_destroy(priv->phylink);
7856 	if (priv->plat->stmmac_rst)
7857 		reset_control_assert(priv->plat->stmmac_rst);
7858 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7859 
7860 	stmmac_pcs_clean(ndev);
7861 	stmmac_mdio_unregister(ndev);
7862 
7863 	destroy_workqueue(priv->wq);
7864 	mutex_destroy(&priv->lock);
7865 	bitmap_free(priv->af_xdp_zc_qps);
7866 
7867 	pm_runtime_disable(dev);
7868 	pm_runtime_put_noidle(dev);
7869 }
7870 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7871 
7872 /**
7873  * stmmac_suspend - suspend callback
7874  * @dev: device pointer
7875  * Description: this is the function to suspend the device and it is called
7876  * by the platform driver to stop the network queue, release the resources,
7877  * program the PMT register (for WoL), clean and release driver resources.
7878  */
stmmac_suspend(struct device * dev)7879 int stmmac_suspend(struct device *dev)
7880 {
7881 	struct net_device *ndev = dev_get_drvdata(dev);
7882 	struct stmmac_priv *priv = netdev_priv(ndev);
7883 	u32 chan;
7884 
7885 	if (!ndev || !netif_running(ndev))
7886 		return 0;
7887 
7888 	mutex_lock(&priv->lock);
7889 
7890 	netif_device_detach(ndev);
7891 
7892 	stmmac_disable_all_queues(priv);
7893 
7894 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7895 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7896 
7897 	if (priv->eee_enabled) {
7898 		priv->tx_path_in_lpi_mode = false;
7899 		del_timer_sync(&priv->eee_ctrl_timer);
7900 	}
7901 
7902 	/* Stop TX/RX DMA */
7903 	stmmac_stop_all_dma(priv);
7904 
7905 	if (priv->plat->serdes_powerdown)
7906 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7907 
7908 	/* Enable Power down mode by programming the PMT regs */
7909 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7910 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7911 		priv->irq_wake = 1;
7912 	} else {
7913 		stmmac_mac_set(priv, priv->ioaddr, false);
7914 		pinctrl_pm_select_sleep_state(priv->device);
7915 	}
7916 
7917 	mutex_unlock(&priv->lock);
7918 
7919 	rtnl_lock();
7920 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7921 		phylink_suspend(priv->phylink, true);
7922 	} else {
7923 		if (device_may_wakeup(priv->device))
7924 			phylink_speed_down(priv->phylink, false);
7925 		phylink_suspend(priv->phylink, false);
7926 	}
7927 	rtnl_unlock();
7928 
7929 	if (priv->dma_cap.fpesel)
7930 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7931 
7932 	priv->speed = SPEED_UNKNOWN;
7933 	return 0;
7934 }
7935 EXPORT_SYMBOL_GPL(stmmac_suspend);
7936 
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7937 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7938 {
7939 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7940 
7941 	rx_q->cur_rx = 0;
7942 	rx_q->dirty_rx = 0;
7943 }
7944 
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7945 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7946 {
7947 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7948 
7949 	tx_q->cur_tx = 0;
7950 	tx_q->dirty_tx = 0;
7951 	tx_q->mss = 0;
7952 
7953 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7954 }
7955 
7956 /**
7957  * stmmac_reset_queues_param - reset queue parameters
7958  * @priv: device pointer
7959  */
stmmac_reset_queues_param(struct stmmac_priv * priv)7960 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7961 {
7962 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7963 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7964 	u32 queue;
7965 
7966 	for (queue = 0; queue < rx_cnt; queue++)
7967 		stmmac_reset_rx_queue(priv, queue);
7968 
7969 	for (queue = 0; queue < tx_cnt; queue++)
7970 		stmmac_reset_tx_queue(priv, queue);
7971 }
7972 
7973 /**
7974  * stmmac_resume - resume callback
7975  * @dev: device pointer
7976  * Description: when resume this function is invoked to setup the DMA and CORE
7977  * in a usable state.
7978  */
stmmac_resume(struct device * dev)7979 int stmmac_resume(struct device *dev)
7980 {
7981 	struct net_device *ndev = dev_get_drvdata(dev);
7982 	struct stmmac_priv *priv = netdev_priv(ndev);
7983 	int ret;
7984 
7985 	if (!netif_running(ndev))
7986 		return 0;
7987 
7988 	/* Power Down bit, into the PM register, is cleared
7989 	 * automatically as soon as a magic packet or a Wake-up frame
7990 	 * is received. Anyway, it's better to manually clear
7991 	 * this bit because it can generate problems while resuming
7992 	 * from another devices (e.g. serial console).
7993 	 */
7994 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7995 		mutex_lock(&priv->lock);
7996 		stmmac_pmt(priv, priv->hw, 0);
7997 		mutex_unlock(&priv->lock);
7998 		priv->irq_wake = 0;
7999 	} else {
8000 		pinctrl_pm_select_default_state(priv->device);
8001 		/* reset the phy so that it's ready */
8002 		if (priv->mii)
8003 			stmmac_mdio_reset(priv->mii);
8004 	}
8005 
8006 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
8007 	    priv->plat->serdes_powerup) {
8008 		ret = priv->plat->serdes_powerup(ndev,
8009 						 priv->plat->bsp_priv);
8010 
8011 		if (ret < 0)
8012 			return ret;
8013 	}
8014 
8015 	rtnl_lock();
8016 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
8017 		phylink_resume(priv->phylink);
8018 	} else {
8019 		phylink_resume(priv->phylink);
8020 		if (device_may_wakeup(priv->device))
8021 			phylink_speed_up(priv->phylink);
8022 	}
8023 	rtnl_unlock();
8024 
8025 	rtnl_lock();
8026 	mutex_lock(&priv->lock);
8027 
8028 	stmmac_reset_queues_param(priv);
8029 
8030 	stmmac_free_tx_skbufs(priv);
8031 	stmmac_clear_descriptors(priv, &priv->dma_conf);
8032 
8033 	stmmac_hw_setup(ndev, false);
8034 	stmmac_init_coalesce(priv);
8035 	stmmac_set_rx_mode(ndev);
8036 
8037 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
8038 
8039 	stmmac_enable_all_queues(priv);
8040 	stmmac_enable_all_dma_irq(priv);
8041 
8042 	mutex_unlock(&priv->lock);
8043 	rtnl_unlock();
8044 
8045 	netif_device_attach(ndev);
8046 
8047 	return 0;
8048 }
8049 EXPORT_SYMBOL_GPL(stmmac_resume);
8050 
8051 #ifndef MODULE
stmmac_cmdline_opt(char * str)8052 static int __init stmmac_cmdline_opt(char *str)
8053 {
8054 	char *opt;
8055 
8056 	if (!str || !*str)
8057 		return 1;
8058 	while ((opt = strsep(&str, ",")) != NULL) {
8059 		if (!strncmp(opt, "debug:", 6)) {
8060 			if (kstrtoint(opt + 6, 0, &debug))
8061 				goto err;
8062 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8063 			if (kstrtoint(opt + 8, 0, &phyaddr))
8064 				goto err;
8065 		} else if (!strncmp(opt, "buf_sz:", 7)) {
8066 			if (kstrtoint(opt + 7, 0, &buf_sz))
8067 				goto err;
8068 		} else if (!strncmp(opt, "tc:", 3)) {
8069 			if (kstrtoint(opt + 3, 0, &tc))
8070 				goto err;
8071 		} else if (!strncmp(opt, "watchdog:", 9)) {
8072 			if (kstrtoint(opt + 9, 0, &watchdog))
8073 				goto err;
8074 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8075 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8076 				goto err;
8077 		} else if (!strncmp(opt, "pause:", 6)) {
8078 			if (kstrtoint(opt + 6, 0, &pause))
8079 				goto err;
8080 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8081 			if (kstrtoint(opt + 10, 0, &eee_timer))
8082 				goto err;
8083 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8084 			if (kstrtoint(opt + 11, 0, &chain_mode))
8085 				goto err;
8086 		}
8087 	}
8088 	return 1;
8089 
8090 err:
8091 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8092 	return 1;
8093 }
8094 
8095 __setup("stmmaceth=", stmmac_cmdline_opt);
8096 #endif /* MODULE */
8097 
stmmac_init(void)8098 static int __init stmmac_init(void)
8099 {
8100 #ifdef CONFIG_DEBUG_FS
8101 	/* Create debugfs main directory if it doesn't exist yet */
8102 	if (!stmmac_fs_dir)
8103 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8104 	register_netdevice_notifier(&stmmac_notifier);
8105 #endif
8106 
8107 	return 0;
8108 }
8109 
stmmac_exit(void)8110 static void __exit stmmac_exit(void)
8111 {
8112 #ifdef CONFIG_DEBUG_FS
8113 	unregister_netdevice_notifier(&stmmac_notifier);
8114 	debugfs_remove_recursive(stmmac_fs_dir);
8115 #endif
8116 }
8117 
8118 module_init(stmmac_init)
8119 module_exit(stmmac_exit)
8120 
8121 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8122 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8123 MODULE_LICENSE("GPL");
8124