• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <net/pkt_cls.h>
42 #include "stmmac_ptp.h"
43 #include "stmmac.h"
44 #include <linux/reset.h>
45 #include <linux/of_mdio.h>
46 #include "dwmac1000.h"
47 #include "dwxgmac2.h"
48 #include "hwif.h"
49 
50 /* As long as the interface is active, we keep the timestamping counter enabled
51  * with fine resolution and binary rollover. This avoid non-monotonic behavior
52  * (clock jumps) when changing timestamping settings at runtime.
53  */
54 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
55 				 PTP_TCR_TSCTRLSSR)
56 
57 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
58 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
59 
60 /* Module parameters */
61 #define TX_TIMEO	5000
62 static int watchdog = TX_TIMEO;
63 module_param(watchdog, int, 0644);
64 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
65 
66 static int debug = -1;
67 module_param(debug, int, 0644);
68 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
69 
70 static int phyaddr = -1;
71 module_param(phyaddr, int, 0444);
72 MODULE_PARM_DESC(phyaddr, "Physical device address");
73 
74 #define STMMAC_TX_THRESH(x)	((x)->dma_tx_size / 4)
75 #define STMMAC_RX_THRESH(x)	((x)->dma_rx_size / 4)
76 
77 static int flow_ctrl = FLOW_OFF;
78 module_param(flow_ctrl, int, 0644);
79 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
80 
81 static int pause = PAUSE_TIME;
82 module_param(pause, int, 0644);
83 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
84 
85 #define TC_DEFAULT 64
86 static int tc = TC_DEFAULT;
87 module_param(tc, int, 0644);
88 MODULE_PARM_DESC(tc, "DMA threshold control value");
89 
90 #define	DEFAULT_BUFSIZE	1536
91 static int buf_sz = DEFAULT_BUFSIZE;
92 module_param(buf_sz, int, 0644);
93 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
94 
95 #define	STMMAC_RX_COPYBREAK	256
96 
97 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
98 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
99 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
100 
101 #define STMMAC_DEFAULT_LPI_TIMER	1000
102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103 module_param(eee_timer, int, 0644);
104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
105 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
106 
107 /* By default the driver will use the ring mode to manage tx and rx descriptors,
108  * but allow user to force to use the chain instead of the ring
109  */
110 static unsigned int chain_mode;
111 module_param(chain_mode, int, 0444);
112 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
113 
114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
115 
116 #ifdef CONFIG_DEBUG_FS
117 static const struct net_device_ops stmmac_netdev_ops;
118 static void stmmac_init_fs(struct net_device *dev);
119 static void stmmac_exit_fs(struct net_device *dev);
120 #endif
121 
122 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
123 
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)124 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
125 {
126 	int ret = 0;
127 
128 	if (enabled) {
129 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
130 		if (ret)
131 			return ret;
132 		ret = clk_prepare_enable(priv->plat->pclk);
133 		if (ret) {
134 			clk_disable_unprepare(priv->plat->stmmac_clk);
135 			return ret;
136 		}
137 	} else {
138 		clk_disable_unprepare(priv->plat->stmmac_clk);
139 		clk_disable_unprepare(priv->plat->pclk);
140 	}
141 
142 	return ret;
143 }
144 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
145 
146 /**
147  * stmmac_verify_args - verify the driver parameters.
148  * Description: it checks the driver parameters and set a default in case of
149  * errors.
150  */
stmmac_verify_args(void)151 static void stmmac_verify_args(void)
152 {
153 	if (unlikely(watchdog < 0))
154 		watchdog = TX_TIMEO;
155 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
156 		buf_sz = DEFAULT_BUFSIZE;
157 	if (unlikely(flow_ctrl > 1))
158 		flow_ctrl = FLOW_AUTO;
159 	else if (likely(flow_ctrl < 0))
160 		flow_ctrl = FLOW_OFF;
161 	if (unlikely((pause < 0) || (pause > 0xffff)))
162 		pause = PAUSE_TIME;
163 	if (eee_timer < 0)
164 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
165 }
166 
167 /**
168  * stmmac_disable_all_queues - Disable all queues
169  * @priv: driver private structure
170  */
stmmac_disable_all_queues(struct stmmac_priv * priv)171 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
172 {
173 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
174 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
175 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
176 	u32 queue;
177 
178 	for (queue = 0; queue < maxq; queue++) {
179 		struct stmmac_channel *ch = &priv->channel[queue];
180 
181 		if (queue < rx_queues_cnt)
182 			napi_disable(&ch->rx_napi);
183 		if (queue < tx_queues_cnt)
184 			napi_disable(&ch->tx_napi);
185 	}
186 }
187 
188 /**
189  * stmmac_enable_all_queues - Enable all queues
190  * @priv: driver private structure
191  */
stmmac_enable_all_queues(struct stmmac_priv * priv)192 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
193 {
194 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
195 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
196 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
197 	u32 queue;
198 
199 	for (queue = 0; queue < maxq; queue++) {
200 		struct stmmac_channel *ch = &priv->channel[queue];
201 
202 		if (queue < rx_queues_cnt)
203 			napi_enable(&ch->rx_napi);
204 		if (queue < tx_queues_cnt)
205 			napi_enable(&ch->tx_napi);
206 	}
207 }
208 
stmmac_service_event_schedule(struct stmmac_priv * priv)209 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
210 {
211 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
212 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
213 		queue_work(priv->wq, &priv->service_task);
214 }
215 
stmmac_global_err(struct stmmac_priv * priv)216 static void stmmac_global_err(struct stmmac_priv *priv)
217 {
218 	netif_carrier_off(priv->dev);
219 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
220 	stmmac_service_event_schedule(priv);
221 }
222 
223 /**
224  * stmmac_clk_csr_set - dynamically set the MDC clock
225  * @priv: driver private structure
226  * Description: this is to dynamically set the MDC clock according to the csr
227  * clock input.
228  * Note:
229  *	If a specific clk_csr value is passed from the platform
230  *	this means that the CSR Clock Range selection cannot be
231  *	changed at run-time and it is fixed (as reported in the driver
232  *	documentation). Viceversa the driver will try to set the MDC
233  *	clock dynamically according to the actual clock input.
234  */
stmmac_clk_csr_set(struct stmmac_priv * priv)235 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
236 {
237 	u32 clk_rate;
238 
239 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
240 
241 	/* Platform provided default clk_csr would be assumed valid
242 	 * for all other cases except for the below mentioned ones.
243 	 * For values higher than the IEEE 802.3 specified frequency
244 	 * we can not estimate the proper divider as it is not known
245 	 * the frequency of clk_csr_i. So we do not change the default
246 	 * divider.
247 	 */
248 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
249 		if (clk_rate < CSR_F_35M)
250 			priv->clk_csr = STMMAC_CSR_20_35M;
251 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
252 			priv->clk_csr = STMMAC_CSR_35_60M;
253 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
254 			priv->clk_csr = STMMAC_CSR_60_100M;
255 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
256 			priv->clk_csr = STMMAC_CSR_100_150M;
257 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
258 			priv->clk_csr = STMMAC_CSR_150_250M;
259 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
260 			priv->clk_csr = STMMAC_CSR_250_300M;
261 	}
262 
263 	if (priv->plat->has_sun8i) {
264 		if (clk_rate > 160000000)
265 			priv->clk_csr = 0x03;
266 		else if (clk_rate > 80000000)
267 			priv->clk_csr = 0x02;
268 		else if (clk_rate > 40000000)
269 			priv->clk_csr = 0x01;
270 		else
271 			priv->clk_csr = 0;
272 	}
273 
274 	if (priv->plat->has_xgmac) {
275 		if (clk_rate > 400000000)
276 			priv->clk_csr = 0x5;
277 		else if (clk_rate > 350000000)
278 			priv->clk_csr = 0x4;
279 		else if (clk_rate > 300000000)
280 			priv->clk_csr = 0x3;
281 		else if (clk_rate > 250000000)
282 			priv->clk_csr = 0x2;
283 		else if (clk_rate > 150000000)
284 			priv->clk_csr = 0x1;
285 		else
286 			priv->clk_csr = 0x0;
287 	}
288 }
289 
print_pkt(unsigned char * buf,int len)290 static void print_pkt(unsigned char *buf, int len)
291 {
292 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
293 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
294 }
295 
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)296 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
297 {
298 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
299 	u32 avail;
300 
301 	if (tx_q->dirty_tx > tx_q->cur_tx)
302 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
303 	else
304 		avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
305 
306 	return avail;
307 }
308 
309 /**
310  * stmmac_rx_dirty - Get RX queue dirty
311  * @priv: driver private structure
312  * @queue: RX queue index
313  */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)314 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
315 {
316 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
317 	u32 dirty;
318 
319 	if (rx_q->dirty_rx <= rx_q->cur_rx)
320 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
321 	else
322 		dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
323 
324 	return dirty;
325 }
326 
327 /**
328  * stmmac_enable_eee_mode - check and enter in LPI mode
329  * @priv: driver private structure
330  * Description: this function is to verify and enter in LPI mode in case of
331  * EEE.
332  */
stmmac_enable_eee_mode(struct stmmac_priv * priv)333 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
334 {
335 	u32 tx_cnt = priv->plat->tx_queues_to_use;
336 	u32 queue;
337 
338 	/* check if all TX queues have the work finished */
339 	for (queue = 0; queue < tx_cnt; queue++) {
340 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
341 
342 		if (tx_q->dirty_tx != tx_q->cur_tx)
343 			return; /* still unfinished work */
344 	}
345 
346 	/* Check and enter in LPI mode */
347 	if (!priv->tx_path_in_lpi_mode)
348 		stmmac_set_eee_mode(priv, priv->hw,
349 				priv->plat->en_tx_lpi_clockgating);
350 }
351 
352 /**
353  * stmmac_disable_eee_mode - disable and exit from LPI mode
354  * @priv: driver private structure
355  * Description: this function is to exit and disable EEE in case of
356  * LPI state is true. This is called by the xmit.
357  */
stmmac_disable_eee_mode(struct stmmac_priv * priv)358 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
359 {
360 	stmmac_reset_eee_mode(priv, priv->hw);
361 	del_timer_sync(&priv->eee_ctrl_timer);
362 	priv->tx_path_in_lpi_mode = false;
363 }
364 
365 /**
366  * stmmac_eee_ctrl_timer - EEE TX SW timer.
367  * @t:  timer_list struct containing private info
368  * Description:
369  *  if there is no data transfer and if we are not in LPI state,
370  *  then MAC Transmitter can be moved to LPI state.
371  */
stmmac_eee_ctrl_timer(struct timer_list * t)372 static void stmmac_eee_ctrl_timer(struct timer_list *t)
373 {
374 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
375 
376 	stmmac_enable_eee_mode(priv);
377 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
378 }
379 
380 /**
381  * stmmac_eee_init - init EEE
382  * @priv: driver private structure
383  * Description:
384  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
385  *  can also manage EEE, this function enable the LPI state and start related
386  *  timer.
387  */
stmmac_eee_init(struct stmmac_priv * priv)388 bool stmmac_eee_init(struct stmmac_priv *priv)
389 {
390 	int eee_tw_timer = priv->eee_tw_timer;
391 
392 	/* Using PCS we cannot dial with the phy registers at this stage
393 	 * so we do not support extra feature like EEE.
394 	 */
395 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
396 	    priv->hw->pcs == STMMAC_PCS_RTBI)
397 		return false;
398 
399 	/* Check if MAC core supports the EEE feature. */
400 	if (!priv->dma_cap.eee)
401 		return false;
402 
403 	mutex_lock(&priv->lock);
404 
405 	/* Check if it needs to be deactivated */
406 	if (!priv->eee_active) {
407 		if (priv->eee_enabled) {
408 			netdev_dbg(priv->dev, "disable EEE\n");
409 			del_timer_sync(&priv->eee_ctrl_timer);
410 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
411 		}
412 		mutex_unlock(&priv->lock);
413 		return false;
414 	}
415 
416 	if (priv->eee_active && !priv->eee_enabled) {
417 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
418 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
419 				     eee_tw_timer);
420 	}
421 
422 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
423 
424 	mutex_unlock(&priv->lock);
425 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
426 	return true;
427 }
428 
429 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
430  * @priv: driver private structure
431  * @p : descriptor pointer
432  * @skb : the socket buffer
433  * Description :
434  * This function will read timestamp from the descriptor & pass it to stack.
435  * and also perform some sanity checks.
436  */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)437 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
438 				   struct dma_desc *p, struct sk_buff *skb)
439 {
440 	struct skb_shared_hwtstamps shhwtstamp;
441 	bool found = false;
442 	u64 ns = 0;
443 
444 	if (!priv->hwts_tx_en)
445 		return;
446 
447 	/* exit if skb doesn't support hw tstamp */
448 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
449 		return;
450 
451 	/* check tx tstamp status */
452 	if (stmmac_get_tx_timestamp_status(priv, p)) {
453 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
454 		found = true;
455 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
456 		found = true;
457 	}
458 
459 	if (found) {
460 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
461 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
462 
463 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
464 		/* pass tstamp to stack */
465 		skb_tstamp_tx(skb, &shhwtstamp);
466 	}
467 }
468 
469 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
470  * @priv: driver private structure
471  * @p : descriptor pointer
472  * @np : next descriptor pointer
473  * @skb : the socket buffer
474  * Description :
475  * This function will read received packet's timestamp from the descriptor
476  * and pass it to stack. It also perform some sanity checks.
477  */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)478 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
479 				   struct dma_desc *np, struct sk_buff *skb)
480 {
481 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
482 	struct dma_desc *desc = p;
483 	u64 ns = 0;
484 
485 	if (!priv->hwts_rx_en)
486 		return;
487 	/* For GMAC4, the valid timestamp is from CTX next desc. */
488 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
489 		desc = np;
490 
491 	/* Check if timestamp is available */
492 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
493 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
494 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
495 		shhwtstamp = skb_hwtstamps(skb);
496 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
497 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
498 	} else  {
499 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
500 	}
501 }
502 
503 /**
504  *  stmmac_hwtstamp_set - control hardware timestamping.
505  *  @dev: device pointer.
506  *  @ifr: An IOCTL specific structure, that can contain a pointer to
507  *  a proprietary structure used to pass information to the driver.
508  *  Description:
509  *  This function configures the MAC to enable/disable both outgoing(TX)
510  *  and incoming(RX) packets time stamping based on user input.
511  *  Return Value:
512  *  0 on success and an appropriate -ve integer on failure.
513  */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)514 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
515 {
516 	struct stmmac_priv *priv = netdev_priv(dev);
517 	struct hwtstamp_config config;
518 	u32 ptp_v2 = 0;
519 	u32 tstamp_all = 0;
520 	u32 ptp_over_ipv4_udp = 0;
521 	u32 ptp_over_ipv6_udp = 0;
522 	u32 ptp_over_ethernet = 0;
523 	u32 snap_type_sel = 0;
524 	u32 ts_master_en = 0;
525 	u32 ts_event_en = 0;
526 
527 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
528 		netdev_alert(priv->dev, "No support for HW time stamping\n");
529 		priv->hwts_tx_en = 0;
530 		priv->hwts_rx_en = 0;
531 
532 		return -EOPNOTSUPP;
533 	}
534 
535 	if (copy_from_user(&config, ifr->ifr_data,
536 			   sizeof(config)))
537 		return -EFAULT;
538 
539 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
540 		   __func__, config.flags, config.tx_type, config.rx_filter);
541 
542 	/* reserved for future extensions */
543 	if (config.flags)
544 		return -EINVAL;
545 
546 	if (config.tx_type != HWTSTAMP_TX_OFF &&
547 	    config.tx_type != HWTSTAMP_TX_ON)
548 		return -ERANGE;
549 
550 	if (priv->adv_ts) {
551 		switch (config.rx_filter) {
552 		case HWTSTAMP_FILTER_NONE:
553 			/* time stamp no incoming packet at all */
554 			config.rx_filter = HWTSTAMP_FILTER_NONE;
555 			break;
556 
557 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
558 			/* PTP v1, UDP, any kind of event packet */
559 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
560 			/* 'xmac' hardware can support Sync, Pdelay_Req and
561 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
562 			 * This leaves Delay_Req timestamps out.
563 			 * Enable all events *and* general purpose message
564 			 * timestamping
565 			 */
566 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
567 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
568 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
569 			break;
570 
571 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
572 			/* PTP v1, UDP, Sync packet */
573 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
574 			/* take time stamp for SYNC messages only */
575 			ts_event_en = PTP_TCR_TSEVNTENA;
576 
577 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
578 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
579 			break;
580 
581 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
582 			/* PTP v1, UDP, Delay_req packet */
583 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
584 			/* take time stamp for Delay_Req messages only */
585 			ts_master_en = PTP_TCR_TSMSTRENA;
586 			ts_event_en = PTP_TCR_TSEVNTENA;
587 
588 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
589 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
590 			break;
591 
592 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
593 			/* PTP v2, UDP, any kind of event packet */
594 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
595 			ptp_v2 = PTP_TCR_TSVER2ENA;
596 			/* take time stamp for all event messages */
597 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
598 
599 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
600 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
601 			break;
602 
603 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
604 			/* PTP v2, UDP, Sync packet */
605 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
606 			ptp_v2 = PTP_TCR_TSVER2ENA;
607 			/* take time stamp for SYNC messages only */
608 			ts_event_en = PTP_TCR_TSEVNTENA;
609 
610 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
611 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
612 			break;
613 
614 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
615 			/* PTP v2, UDP, Delay_req packet */
616 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
617 			ptp_v2 = PTP_TCR_TSVER2ENA;
618 			/* take time stamp for Delay_Req messages only */
619 			ts_master_en = PTP_TCR_TSMSTRENA;
620 			ts_event_en = PTP_TCR_TSEVNTENA;
621 
622 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
623 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
624 			break;
625 
626 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
627 			/* PTP v2/802.AS1 any layer, any kind of event packet */
628 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
629 			ptp_v2 = PTP_TCR_TSVER2ENA;
630 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
631 			if (priv->synopsys_id < DWMAC_CORE_4_10)
632 				ts_event_en = PTP_TCR_TSEVNTENA;
633 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
634 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
635 			ptp_over_ethernet = PTP_TCR_TSIPENA;
636 			break;
637 
638 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
639 			/* PTP v2/802.AS1, any layer, Sync packet */
640 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
641 			ptp_v2 = PTP_TCR_TSVER2ENA;
642 			/* take time stamp for SYNC messages only */
643 			ts_event_en = PTP_TCR_TSEVNTENA;
644 
645 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
646 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
647 			ptp_over_ethernet = PTP_TCR_TSIPENA;
648 			break;
649 
650 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
651 			/* PTP v2/802.AS1, any layer, Delay_req packet */
652 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
653 			ptp_v2 = PTP_TCR_TSVER2ENA;
654 			/* take time stamp for Delay_Req messages only */
655 			ts_master_en = PTP_TCR_TSMSTRENA;
656 			ts_event_en = PTP_TCR_TSEVNTENA;
657 
658 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
659 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
660 			ptp_over_ethernet = PTP_TCR_TSIPENA;
661 			break;
662 
663 		case HWTSTAMP_FILTER_NTP_ALL:
664 		case HWTSTAMP_FILTER_ALL:
665 			/* time stamp any incoming packet */
666 			config.rx_filter = HWTSTAMP_FILTER_ALL;
667 			tstamp_all = PTP_TCR_TSENALL;
668 			break;
669 
670 		default:
671 			return -ERANGE;
672 		}
673 	} else {
674 		switch (config.rx_filter) {
675 		case HWTSTAMP_FILTER_NONE:
676 			config.rx_filter = HWTSTAMP_FILTER_NONE;
677 			break;
678 		default:
679 			/* PTP v1, UDP, any kind of event packet */
680 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
681 			break;
682 		}
683 	}
684 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
685 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
686 
687 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
688 
689 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
690 		priv->systime_flags |= tstamp_all | ptp_v2 |
691 				       ptp_over_ethernet | ptp_over_ipv6_udp |
692 				       ptp_over_ipv4_udp | ts_event_en |
693 				       ts_master_en | snap_type_sel;
694 	}
695 
696 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
697 
698 	memcpy(&priv->tstamp_config, &config, sizeof(config));
699 
700 	return copy_to_user(ifr->ifr_data, &config,
701 			    sizeof(config)) ? -EFAULT : 0;
702 }
703 
704 /**
705  *  stmmac_hwtstamp_get - read hardware timestamping.
706  *  @dev: device pointer.
707  *  @ifr: An IOCTL specific structure, that can contain a pointer to
708  *  a proprietary structure used to pass information to the driver.
709  *  Description:
710  *  This function obtain the current hardware timestamping settings
711  *  as requested.
712  */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)713 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
714 {
715 	struct stmmac_priv *priv = netdev_priv(dev);
716 	struct hwtstamp_config *config = &priv->tstamp_config;
717 
718 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
719 		return -EOPNOTSUPP;
720 
721 	return copy_to_user(ifr->ifr_data, config,
722 			    sizeof(*config)) ? -EFAULT : 0;
723 }
724 
725 /**
726  * stmmac_init_tstamp_counter - init hardware timestamping counter
727  * @priv: driver private structure
728  * @systime_flags: timestamping flags
729  * Description:
730  * Initialize hardware counter for packet timestamping.
731  * This is valid as long as the interface is open and not suspended.
732  * Will be rerun after resuming from suspend, case in which the timestamping
733  * flags updated by stmmac_hwtstamp_set() also need to be restored.
734  */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)735 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
736 {
737 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
738 	struct timespec64 now;
739 	u32 sec_inc = 0;
740 	u64 temp = 0;
741 
742 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
743 		return -EOPNOTSUPP;
744 
745 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
746 	priv->systime_flags = systime_flags;
747 
748 	/* program Sub Second Increment reg */
749 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
750 					   priv->plat->clk_ptp_rate,
751 					   xmac, &sec_inc);
752 	temp = div_u64(1000000000ULL, sec_inc);
753 
754 	/* Store sub second increment for later use */
755 	priv->sub_second_inc = sec_inc;
756 
757 	/* calculate default added value:
758 	 * formula is :
759 	 * addend = (2^32)/freq_div_ratio;
760 	 * where, freq_div_ratio = 1e9ns/sec_inc
761 	 */
762 	temp = (u64)(temp << 32);
763 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
764 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
765 
766 	/* initialize system time */
767 	ktime_get_real_ts64(&now);
768 
769 	/* lower 32 bits of tv_sec are safe until y2106 */
770 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
771 
772 	return 0;
773 }
774 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
775 
776 /**
777  * stmmac_init_ptp - init PTP
778  * @priv: driver private structure
779  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
780  * This is done by looking at the HW cap. register.
781  * This function also registers the ptp driver.
782  */
stmmac_init_ptp(struct stmmac_priv * priv)783 static int stmmac_init_ptp(struct stmmac_priv *priv)
784 {
785 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
786 	int ret;
787 
788 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
789 	if (ret)
790 		return ret;
791 
792 	priv->adv_ts = 0;
793 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
794 	if (xmac && priv->dma_cap.atime_stamp)
795 		priv->adv_ts = 1;
796 	/* Dwmac 3.x core with extend_desc can support adv_ts */
797 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
798 		priv->adv_ts = 1;
799 
800 	if (priv->dma_cap.time_stamp)
801 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
802 
803 	if (priv->adv_ts)
804 		netdev_info(priv->dev,
805 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
806 
807 	priv->hwts_tx_en = 0;
808 	priv->hwts_rx_en = 0;
809 
810 	return 0;
811 }
812 
stmmac_release_ptp(struct stmmac_priv * priv)813 static void stmmac_release_ptp(struct stmmac_priv *priv)
814 {
815 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
816 	stmmac_ptp_unregister(priv);
817 }
818 
819 /**
820  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
821  *  @priv: driver private structure
822  *  @duplex: duplex passed to the next function
823  *  Description: It is used for configuring the flow control in all queues
824  */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)825 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
826 {
827 	u32 tx_cnt = priv->plat->tx_queues_to_use;
828 
829 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
830 			priv->pause, tx_cnt);
831 }
832 
stmmac_validate(struct phylink_config * config,unsigned long * supported,struct phylink_link_state * state)833 static void stmmac_validate(struct phylink_config *config,
834 			    unsigned long *supported,
835 			    struct phylink_link_state *state)
836 {
837 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
838 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
839 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
840 	int tx_cnt = priv->plat->tx_queues_to_use;
841 	int max_speed = priv->plat->max_speed;
842 
843 	phylink_set(mac_supported, 10baseT_Half);
844 	phylink_set(mac_supported, 10baseT_Full);
845 	phylink_set(mac_supported, 100baseT_Half);
846 	phylink_set(mac_supported, 100baseT_Full);
847 	phylink_set(mac_supported, 1000baseT_Half);
848 	phylink_set(mac_supported, 1000baseT_Full);
849 	phylink_set(mac_supported, 1000baseKX_Full);
850 
851 	phylink_set(mac_supported, Autoneg);
852 	phylink_set(mac_supported, Pause);
853 	phylink_set(mac_supported, Asym_Pause);
854 	phylink_set_port_modes(mac_supported);
855 
856 	/* Cut down 1G if asked to */
857 	if ((max_speed > 0) && (max_speed < 1000)) {
858 		phylink_set(mask, 1000baseT_Full);
859 		phylink_set(mask, 1000baseX_Full);
860 	} else if (priv->plat->has_xgmac) {
861 		if (!max_speed || (max_speed >= 2500)) {
862 			phylink_set(mac_supported, 2500baseT_Full);
863 			phylink_set(mac_supported, 2500baseX_Full);
864 		}
865 		if (!max_speed || (max_speed >= 5000)) {
866 			phylink_set(mac_supported, 5000baseT_Full);
867 		}
868 		if (!max_speed || (max_speed >= 10000)) {
869 			phylink_set(mac_supported, 10000baseSR_Full);
870 			phylink_set(mac_supported, 10000baseLR_Full);
871 			phylink_set(mac_supported, 10000baseER_Full);
872 			phylink_set(mac_supported, 10000baseLRM_Full);
873 			phylink_set(mac_supported, 10000baseT_Full);
874 			phylink_set(mac_supported, 10000baseKX4_Full);
875 			phylink_set(mac_supported, 10000baseKR_Full);
876 		}
877 		if (!max_speed || (max_speed >= 25000)) {
878 			phylink_set(mac_supported, 25000baseCR_Full);
879 			phylink_set(mac_supported, 25000baseKR_Full);
880 			phylink_set(mac_supported, 25000baseSR_Full);
881 		}
882 		if (!max_speed || (max_speed >= 40000)) {
883 			phylink_set(mac_supported, 40000baseKR4_Full);
884 			phylink_set(mac_supported, 40000baseCR4_Full);
885 			phylink_set(mac_supported, 40000baseSR4_Full);
886 			phylink_set(mac_supported, 40000baseLR4_Full);
887 		}
888 		if (!max_speed || (max_speed >= 50000)) {
889 			phylink_set(mac_supported, 50000baseCR2_Full);
890 			phylink_set(mac_supported, 50000baseKR2_Full);
891 			phylink_set(mac_supported, 50000baseSR2_Full);
892 			phylink_set(mac_supported, 50000baseKR_Full);
893 			phylink_set(mac_supported, 50000baseSR_Full);
894 			phylink_set(mac_supported, 50000baseCR_Full);
895 			phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
896 			phylink_set(mac_supported, 50000baseDR_Full);
897 		}
898 		if (!max_speed || (max_speed >= 100000)) {
899 			phylink_set(mac_supported, 100000baseKR4_Full);
900 			phylink_set(mac_supported, 100000baseSR4_Full);
901 			phylink_set(mac_supported, 100000baseCR4_Full);
902 			phylink_set(mac_supported, 100000baseLR4_ER4_Full);
903 			phylink_set(mac_supported, 100000baseKR2_Full);
904 			phylink_set(mac_supported, 100000baseSR2_Full);
905 			phylink_set(mac_supported, 100000baseCR2_Full);
906 			phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
907 			phylink_set(mac_supported, 100000baseDR2_Full);
908 		}
909 	}
910 
911 	/* Half-Duplex can only work with single queue */
912 	if (tx_cnt > 1) {
913 		phylink_set(mask, 10baseT_Half);
914 		phylink_set(mask, 100baseT_Half);
915 		phylink_set(mask, 1000baseT_Half);
916 	}
917 
918 	linkmode_and(supported, supported, mac_supported);
919 	linkmode_andnot(supported, supported, mask);
920 
921 	linkmode_and(state->advertising, state->advertising, mac_supported);
922 	linkmode_andnot(state->advertising, state->advertising, mask);
923 
924 	/* If PCS is supported, check which modes it supports. */
925 	stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
926 }
927 
stmmac_mac_pcs_get_state(struct phylink_config * config,struct phylink_link_state * state)928 static void stmmac_mac_pcs_get_state(struct phylink_config *config,
929 				     struct phylink_link_state *state)
930 {
931 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
932 
933 	state->link = 0;
934 	stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
935 }
936 
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)937 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
938 			      const struct phylink_link_state *state)
939 {
940 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
941 
942 	stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
943 }
944 
stmmac_mac_an_restart(struct phylink_config * config)945 static void stmmac_mac_an_restart(struct phylink_config *config)
946 {
947 	/* Not Supported */
948 }
949 
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)950 static void stmmac_mac_link_down(struct phylink_config *config,
951 				 unsigned int mode, phy_interface_t interface)
952 {
953 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
954 
955 	stmmac_mac_set(priv, priv->ioaddr, false);
956 	priv->eee_active = false;
957 	priv->tx_lpi_enabled = false;
958 	stmmac_eee_init(priv);
959 	stmmac_set_eee_pls(priv, priv->hw, false);
960 }
961 
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)962 static void stmmac_mac_link_up(struct phylink_config *config,
963 			       struct phy_device *phy,
964 			       unsigned int mode, phy_interface_t interface,
965 			       int speed, int duplex,
966 			       bool tx_pause, bool rx_pause)
967 {
968 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
969 	u32 ctrl;
970 
971 	stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
972 
973 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
974 	ctrl &= ~priv->hw->link.speed_mask;
975 
976 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
977 		switch (speed) {
978 		case SPEED_10000:
979 			ctrl |= priv->hw->link.xgmii.speed10000;
980 			break;
981 		case SPEED_5000:
982 			ctrl |= priv->hw->link.xgmii.speed5000;
983 			break;
984 		case SPEED_2500:
985 			ctrl |= priv->hw->link.xgmii.speed2500;
986 			break;
987 		default:
988 			return;
989 		}
990 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
991 		switch (speed) {
992 		case SPEED_100000:
993 			ctrl |= priv->hw->link.xlgmii.speed100000;
994 			break;
995 		case SPEED_50000:
996 			ctrl |= priv->hw->link.xlgmii.speed50000;
997 			break;
998 		case SPEED_40000:
999 			ctrl |= priv->hw->link.xlgmii.speed40000;
1000 			break;
1001 		case SPEED_25000:
1002 			ctrl |= priv->hw->link.xlgmii.speed25000;
1003 			break;
1004 		case SPEED_10000:
1005 			ctrl |= priv->hw->link.xgmii.speed10000;
1006 			break;
1007 		case SPEED_2500:
1008 			ctrl |= priv->hw->link.speed2500;
1009 			break;
1010 		case SPEED_1000:
1011 			ctrl |= priv->hw->link.speed1000;
1012 			break;
1013 		default:
1014 			return;
1015 		}
1016 	} else {
1017 		switch (speed) {
1018 		case SPEED_2500:
1019 			ctrl |= priv->hw->link.speed2500;
1020 			break;
1021 		case SPEED_1000:
1022 			ctrl |= priv->hw->link.speed1000;
1023 			break;
1024 		case SPEED_100:
1025 			ctrl |= priv->hw->link.speed100;
1026 			break;
1027 		case SPEED_10:
1028 			ctrl |= priv->hw->link.speed10;
1029 			break;
1030 		default:
1031 			return;
1032 		}
1033 	}
1034 
1035 	priv->speed = speed;
1036 
1037 	if (priv->plat->fix_mac_speed)
1038 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1039 
1040 	if (!duplex)
1041 		ctrl &= ~priv->hw->link.duplex;
1042 	else
1043 		ctrl |= priv->hw->link.duplex;
1044 
1045 	/* Flow Control operation */
1046 	if (rx_pause && tx_pause)
1047 		priv->flow_ctrl = FLOW_AUTO;
1048 	else if (rx_pause && !tx_pause)
1049 		priv->flow_ctrl = FLOW_RX;
1050 	else if (!rx_pause && tx_pause)
1051 		priv->flow_ctrl = FLOW_TX;
1052 	else
1053 		priv->flow_ctrl = FLOW_OFF;
1054 
1055 	stmmac_mac_flow_ctrl(priv, duplex);
1056 
1057 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1058 
1059 	stmmac_mac_set(priv, priv->ioaddr, true);
1060 	if (phy && priv->dma_cap.eee) {
1061 		priv->eee_active =
1062 			phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
1063 		priv->eee_enabled = stmmac_eee_init(priv);
1064 		priv->tx_lpi_enabled = priv->eee_enabled;
1065 		stmmac_set_eee_pls(priv, priv->hw, true);
1066 	}
1067 }
1068 
1069 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1070 	.validate = stmmac_validate,
1071 	.mac_pcs_get_state = stmmac_mac_pcs_get_state,
1072 	.mac_config = stmmac_mac_config,
1073 	.mac_an_restart = stmmac_mac_an_restart,
1074 	.mac_link_down = stmmac_mac_link_down,
1075 	.mac_link_up = stmmac_mac_link_up,
1076 };
1077 
1078 /**
1079  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1080  * @priv: driver private structure
1081  * Description: this is to verify if the HW supports the PCS.
1082  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1083  * configured for the TBI, RTBI, or SGMII PHY interface.
1084  */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1085 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1086 {
1087 	int interface = priv->plat->interface;
1088 
1089 	if (priv->dma_cap.pcs) {
1090 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1091 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1092 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1093 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1094 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1095 			priv->hw->pcs = STMMAC_PCS_RGMII;
1096 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1097 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1098 			priv->hw->pcs = STMMAC_PCS_SGMII;
1099 		}
1100 	}
1101 }
1102 
1103 /**
1104  * stmmac_init_phy - PHY initialization
1105  * @dev: net device structure
1106  * Description: it initializes the driver's PHY state, and attaches the PHY
1107  * to the mac driver.
1108  *  Return value:
1109  *  0 on success
1110  */
stmmac_init_phy(struct net_device * dev)1111 static int stmmac_init_phy(struct net_device *dev)
1112 {
1113 	struct stmmac_priv *priv = netdev_priv(dev);
1114 	struct device_node *node;
1115 	int ret;
1116 
1117 	node = priv->plat->phylink_node;
1118 
1119 	if (node)
1120 		ret = phylink_of_phy_connect(priv->phylink, node, 0);
1121 
1122 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1123 	 * manually parse it
1124 	 */
1125 	if (!node || ret) {
1126 		int addr = priv->plat->phy_addr;
1127 		struct phy_device *phydev;
1128 
1129 		if (addr < 0) {
1130 			netdev_err(priv->dev, "no phy found\n");
1131 			return -ENODEV;
1132 		}
1133 
1134 		phydev = mdiobus_get_phy(priv->mii, addr);
1135 		if (!phydev) {
1136 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1137 			return -ENODEV;
1138 		}
1139 
1140 		ret = phylink_connect_phy(priv->phylink, phydev);
1141 	}
1142 
1143 	if (!priv->plat->pmt) {
1144 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1145 
1146 		phylink_ethtool_get_wol(priv->phylink, &wol);
1147 		device_set_wakeup_capable(priv->device, !!wol.supported);
1148 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1149 	}
1150 
1151 	return ret;
1152 }
1153 
stmmac_phy_setup(struct stmmac_priv * priv)1154 static int stmmac_phy_setup(struct stmmac_priv *priv)
1155 {
1156 	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1157 	int mode = priv->plat->phy_interface;
1158 	struct phylink *phylink;
1159 
1160 	priv->phylink_config.dev = &priv->dev->dev;
1161 	priv->phylink_config.type = PHYLINK_NETDEV;
1162 	priv->phylink_config.pcs_poll = true;
1163 
1164 	if (!fwnode)
1165 		fwnode = dev_fwnode(priv->device);
1166 
1167 	phylink = phylink_create(&priv->phylink_config, fwnode,
1168 				 mode, &stmmac_phylink_mac_ops);
1169 	if (IS_ERR(phylink))
1170 		return PTR_ERR(phylink);
1171 
1172 	priv->phylink = phylink;
1173 	return 0;
1174 }
1175 
stmmac_display_rx_rings(struct stmmac_priv * priv)1176 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1177 {
1178 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1179 	unsigned int desc_size;
1180 	void *head_rx;
1181 	u32 queue;
1182 
1183 	/* Display RX rings */
1184 	for (queue = 0; queue < rx_cnt; queue++) {
1185 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1186 
1187 		pr_info("\tRX Queue %u rings\n", queue);
1188 
1189 		if (priv->extend_desc) {
1190 			head_rx = (void *)rx_q->dma_erx;
1191 			desc_size = sizeof(struct dma_extended_desc);
1192 		} else {
1193 			head_rx = (void *)rx_q->dma_rx;
1194 			desc_size = sizeof(struct dma_desc);
1195 		}
1196 
1197 		/* Display RX ring */
1198 		stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1199 				    rx_q->dma_rx_phy, desc_size);
1200 	}
1201 }
1202 
stmmac_display_tx_rings(struct stmmac_priv * priv)1203 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1204 {
1205 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1206 	unsigned int desc_size;
1207 	void *head_tx;
1208 	u32 queue;
1209 
1210 	/* Display TX rings */
1211 	for (queue = 0; queue < tx_cnt; queue++) {
1212 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1213 
1214 		pr_info("\tTX Queue %d rings\n", queue);
1215 
1216 		if (priv->extend_desc) {
1217 			head_tx = (void *)tx_q->dma_etx;
1218 			desc_size = sizeof(struct dma_extended_desc);
1219 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1220 			head_tx = (void *)tx_q->dma_entx;
1221 			desc_size = sizeof(struct dma_edesc);
1222 		} else {
1223 			head_tx = (void *)tx_q->dma_tx;
1224 			desc_size = sizeof(struct dma_desc);
1225 		}
1226 
1227 		stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1228 				    tx_q->dma_tx_phy, desc_size);
1229 	}
1230 }
1231 
stmmac_display_rings(struct stmmac_priv * priv)1232 static void stmmac_display_rings(struct stmmac_priv *priv)
1233 {
1234 	/* Display RX ring */
1235 	stmmac_display_rx_rings(priv);
1236 
1237 	/* Display TX ring */
1238 	stmmac_display_tx_rings(priv);
1239 }
1240 
stmmac_set_bfsize(int mtu,int bufsize)1241 static int stmmac_set_bfsize(int mtu, int bufsize)
1242 {
1243 	int ret = bufsize;
1244 
1245 	if (mtu >= BUF_SIZE_8KiB)
1246 		ret = BUF_SIZE_16KiB;
1247 	else if (mtu >= BUF_SIZE_4KiB)
1248 		ret = BUF_SIZE_8KiB;
1249 	else if (mtu >= BUF_SIZE_2KiB)
1250 		ret = BUF_SIZE_4KiB;
1251 	else if (mtu > DEFAULT_BUFSIZE)
1252 		ret = BUF_SIZE_2KiB;
1253 	else
1254 		ret = DEFAULT_BUFSIZE;
1255 
1256 	return ret;
1257 }
1258 
1259 /**
1260  * stmmac_clear_rx_descriptors - clear RX descriptors
1261  * @priv: driver private structure
1262  * @queue: RX queue index
1263  * Description: this function is called to clear the RX descriptors
1264  * in case of both basic and extended descriptors are used.
1265  */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,u32 queue)1266 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1267 {
1268 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1269 	int i;
1270 
1271 	/* Clear the RX descriptors */
1272 	for (i = 0; i < priv->dma_rx_size; i++)
1273 		if (priv->extend_desc)
1274 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1275 					priv->use_riwt, priv->mode,
1276 					(i == priv->dma_rx_size - 1),
1277 					priv->dma_buf_sz);
1278 		else
1279 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1280 					priv->use_riwt, priv->mode,
1281 					(i == priv->dma_rx_size - 1),
1282 					priv->dma_buf_sz);
1283 }
1284 
1285 /**
1286  * stmmac_clear_tx_descriptors - clear tx descriptors
1287  * @priv: driver private structure
1288  * @queue: TX queue index.
1289  * Description: this function is called to clear the TX descriptors
1290  * in case of both basic and extended descriptors are used.
1291  */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,u32 queue)1292 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1293 {
1294 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1295 	int i;
1296 
1297 	/* Clear the TX descriptors */
1298 	for (i = 0; i < priv->dma_tx_size; i++) {
1299 		int last = (i == (priv->dma_tx_size - 1));
1300 		struct dma_desc *p;
1301 
1302 		if (priv->extend_desc)
1303 			p = &tx_q->dma_etx[i].basic;
1304 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1305 			p = &tx_q->dma_entx[i].basic;
1306 		else
1307 			p = &tx_q->dma_tx[i];
1308 
1309 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1310 	}
1311 }
1312 
1313 /**
1314  * stmmac_clear_descriptors - clear descriptors
1315  * @priv: driver private structure
1316  * Description: this function is called to clear the TX and RX descriptors
1317  * in case of both basic and extended descriptors are used.
1318  */
stmmac_clear_descriptors(struct stmmac_priv * priv)1319 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1320 {
1321 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1322 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1323 	u32 queue;
1324 
1325 	/* Clear the RX descriptors */
1326 	for (queue = 0; queue < rx_queue_cnt; queue++)
1327 		stmmac_clear_rx_descriptors(priv, queue);
1328 
1329 	/* Clear the TX descriptors */
1330 	for (queue = 0; queue < tx_queue_cnt; queue++)
1331 		stmmac_clear_tx_descriptors(priv, queue);
1332 }
1333 
1334 /**
1335  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1336  * @priv: driver private structure
1337  * @p: descriptor pointer
1338  * @i: descriptor index
1339  * @flags: gfp flag
1340  * @queue: RX queue index
1341  * Description: this function is called to allocate a receive buffer, perform
1342  * the DMA mapping and init the descriptor.
1343  */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct dma_desc * p,int i,gfp_t flags,u32 queue)1344 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1345 				  int i, gfp_t flags, u32 queue)
1346 {
1347 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1348 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1349 
1350 	buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
1351 	if (!buf->page)
1352 		return -ENOMEM;
1353 
1354 	if (priv->sph) {
1355 		buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
1356 		if (!buf->sec_page)
1357 			return -ENOMEM;
1358 
1359 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1360 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1361 	} else {
1362 		buf->sec_page = NULL;
1363 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1364 	}
1365 
1366 	buf->addr = page_pool_get_dma_addr(buf->page);
1367 	stmmac_set_desc_addr(priv, p, buf->addr);
1368 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1369 		stmmac_init_desc3(priv, p);
1370 
1371 	return 0;
1372 }
1373 
1374 /**
1375  * stmmac_free_rx_buffer - free RX dma buffers
1376  * @priv: private structure
1377  * @queue: RX queue index
1378  * @i: buffer index.
1379  */
stmmac_free_rx_buffer(struct stmmac_priv * priv,u32 queue,int i)1380 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1381 {
1382 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1383 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1384 
1385 	if (buf->page)
1386 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1387 	buf->page = NULL;
1388 
1389 	if (buf->sec_page)
1390 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1391 	buf->sec_page = NULL;
1392 }
1393 
1394 /**
1395  * stmmac_free_tx_buffer - free RX dma buffers
1396  * @priv: private structure
1397  * @queue: RX queue index
1398  * @i: buffer index.
1399  */
stmmac_free_tx_buffer(struct stmmac_priv * priv,u32 queue,int i)1400 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1401 {
1402 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1403 
1404 	if (tx_q->tx_skbuff_dma[i].buf) {
1405 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1406 			dma_unmap_page(priv->device,
1407 				       tx_q->tx_skbuff_dma[i].buf,
1408 				       tx_q->tx_skbuff_dma[i].len,
1409 				       DMA_TO_DEVICE);
1410 		else
1411 			dma_unmap_single(priv->device,
1412 					 tx_q->tx_skbuff_dma[i].buf,
1413 					 tx_q->tx_skbuff_dma[i].len,
1414 					 DMA_TO_DEVICE);
1415 	}
1416 
1417 	if (tx_q->tx_skbuff[i]) {
1418 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1419 		tx_q->tx_skbuff[i] = NULL;
1420 		tx_q->tx_skbuff_dma[i].buf = 0;
1421 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1422 	}
1423 }
1424 
1425 /**
1426  * init_dma_rx_desc_rings - init the RX descriptor rings
1427  * @dev: net device structure
1428  * @flags: gfp flag.
1429  * Description: this function initializes the DMA RX descriptors
1430  * and allocates the socket buffers. It supports the chained and ring
1431  * modes.
1432  */
init_dma_rx_desc_rings(struct net_device * dev,gfp_t flags)1433 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1434 {
1435 	struct stmmac_priv *priv = netdev_priv(dev);
1436 	u32 rx_count = priv->plat->rx_queues_to_use;
1437 	int ret = -ENOMEM;
1438 	int queue;
1439 	int i;
1440 
1441 	/* RX INITIALIZATION */
1442 	netif_dbg(priv, probe, priv->dev,
1443 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1444 
1445 	for (queue = 0; queue < rx_count; queue++) {
1446 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1447 
1448 		netif_dbg(priv, probe, priv->dev,
1449 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1450 			  (u32)rx_q->dma_rx_phy);
1451 
1452 		stmmac_clear_rx_descriptors(priv, queue);
1453 
1454 		for (i = 0; i < priv->dma_rx_size; i++) {
1455 			struct dma_desc *p;
1456 
1457 			if (priv->extend_desc)
1458 				p = &((rx_q->dma_erx + i)->basic);
1459 			else
1460 				p = rx_q->dma_rx + i;
1461 
1462 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1463 						     queue);
1464 			if (ret)
1465 				goto err_init_rx_buffers;
1466 		}
1467 
1468 		rx_q->cur_rx = 0;
1469 		rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
1470 
1471 		/* Setup the chained descriptor addresses */
1472 		if (priv->mode == STMMAC_CHAIN_MODE) {
1473 			if (priv->extend_desc)
1474 				stmmac_mode_init(priv, rx_q->dma_erx,
1475 						 rx_q->dma_rx_phy,
1476 						 priv->dma_rx_size, 1);
1477 			else
1478 				stmmac_mode_init(priv, rx_q->dma_rx,
1479 						 rx_q->dma_rx_phy,
1480 						 priv->dma_rx_size, 0);
1481 		}
1482 	}
1483 
1484 	return 0;
1485 
1486 err_init_rx_buffers:
1487 	while (queue >= 0) {
1488 		while (--i >= 0)
1489 			stmmac_free_rx_buffer(priv, queue, i);
1490 
1491 		if (queue == 0)
1492 			break;
1493 
1494 		i = priv->dma_rx_size;
1495 		queue--;
1496 	}
1497 
1498 	return ret;
1499 }
1500 
1501 /**
1502  * init_dma_tx_desc_rings - init the TX descriptor rings
1503  * @dev: net device structure.
1504  * Description: this function initializes the DMA TX descriptors
1505  * and allocates the socket buffers. It supports the chained and ring
1506  * modes.
1507  */
init_dma_tx_desc_rings(struct net_device * dev)1508 static int init_dma_tx_desc_rings(struct net_device *dev)
1509 {
1510 	struct stmmac_priv *priv = netdev_priv(dev);
1511 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1512 	u32 queue;
1513 	int i;
1514 
1515 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1516 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1517 
1518 		netif_dbg(priv, probe, priv->dev,
1519 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1520 			 (u32)tx_q->dma_tx_phy);
1521 
1522 		/* Setup the chained descriptor addresses */
1523 		if (priv->mode == STMMAC_CHAIN_MODE) {
1524 			if (priv->extend_desc)
1525 				stmmac_mode_init(priv, tx_q->dma_etx,
1526 						 tx_q->dma_tx_phy,
1527 						 priv->dma_tx_size, 1);
1528 			else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1529 				stmmac_mode_init(priv, tx_q->dma_tx,
1530 						 tx_q->dma_tx_phy,
1531 						 priv->dma_tx_size, 0);
1532 		}
1533 
1534 		for (i = 0; i < priv->dma_tx_size; i++) {
1535 			struct dma_desc *p;
1536 			if (priv->extend_desc)
1537 				p = &((tx_q->dma_etx + i)->basic);
1538 			else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1539 				p = &((tx_q->dma_entx + i)->basic);
1540 			else
1541 				p = tx_q->dma_tx + i;
1542 
1543 			stmmac_clear_desc(priv, p);
1544 
1545 			tx_q->tx_skbuff_dma[i].buf = 0;
1546 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1547 			tx_q->tx_skbuff_dma[i].len = 0;
1548 			tx_q->tx_skbuff_dma[i].last_segment = false;
1549 			tx_q->tx_skbuff[i] = NULL;
1550 		}
1551 
1552 		tx_q->dirty_tx = 0;
1553 		tx_q->cur_tx = 0;
1554 		tx_q->mss = 0;
1555 
1556 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1557 	}
1558 
1559 	return 0;
1560 }
1561 
1562 /**
1563  * init_dma_desc_rings - init the RX/TX descriptor rings
1564  * @dev: net device structure
1565  * @flags: gfp flag.
1566  * Description: this function initializes the DMA RX/TX descriptors
1567  * and allocates the socket buffers. It supports the chained and ring
1568  * modes.
1569  */
init_dma_desc_rings(struct net_device * dev,gfp_t flags)1570 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1571 {
1572 	struct stmmac_priv *priv = netdev_priv(dev);
1573 	int ret;
1574 
1575 	ret = init_dma_rx_desc_rings(dev, flags);
1576 	if (ret)
1577 		return ret;
1578 
1579 	ret = init_dma_tx_desc_rings(dev);
1580 
1581 	stmmac_clear_descriptors(priv);
1582 
1583 	if (netif_msg_hw(priv))
1584 		stmmac_display_rings(priv);
1585 
1586 	return ret;
1587 }
1588 
1589 /**
1590  * dma_free_rx_skbufs - free RX dma buffers
1591  * @priv: private structure
1592  * @queue: RX queue index
1593  */
dma_free_rx_skbufs(struct stmmac_priv * priv,u32 queue)1594 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1595 {
1596 	int i;
1597 
1598 	for (i = 0; i < priv->dma_rx_size; i++)
1599 		stmmac_free_rx_buffer(priv, queue, i);
1600 }
1601 
1602 /**
1603  * dma_free_tx_skbufs - free TX dma buffers
1604  * @priv: private structure
1605  * @queue: TX queue index
1606  */
dma_free_tx_skbufs(struct stmmac_priv * priv,u32 queue)1607 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1608 {
1609 	int i;
1610 
1611 	for (i = 0; i < priv->dma_tx_size; i++)
1612 		stmmac_free_tx_buffer(priv, queue, i);
1613 }
1614 
1615 /**
1616  * stmmac_free_tx_skbufs - free TX skb buffers
1617  * @priv: private structure
1618  */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1619 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1620 {
1621 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1622 	u32 queue;
1623 
1624 	for (queue = 0; queue < tx_queue_cnt; queue++)
1625 		dma_free_tx_skbufs(priv, queue);
1626 }
1627 
1628 /**
1629  * free_dma_rx_desc_resources - free RX dma desc resources
1630  * @priv: private structure
1631  */
free_dma_rx_desc_resources(struct stmmac_priv * priv)1632 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1633 {
1634 	u32 rx_count = priv->plat->rx_queues_to_use;
1635 	u32 queue;
1636 
1637 	/* Free RX queue resources */
1638 	for (queue = 0; queue < rx_count; queue++) {
1639 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1640 
1641 		/* Release the DMA RX socket buffers */
1642 		dma_free_rx_skbufs(priv, queue);
1643 
1644 		/* Free DMA regions of consistent memory previously allocated */
1645 		if (!priv->extend_desc)
1646 			dma_free_coherent(priv->device, priv->dma_rx_size *
1647 					  sizeof(struct dma_desc),
1648 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1649 		else
1650 			dma_free_coherent(priv->device, priv->dma_rx_size *
1651 					  sizeof(struct dma_extended_desc),
1652 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1653 
1654 		kfree(rx_q->buf_pool);
1655 		if (rx_q->page_pool)
1656 			page_pool_destroy(rx_q->page_pool);
1657 	}
1658 }
1659 
1660 /**
1661  * free_dma_tx_desc_resources - free TX dma desc resources
1662  * @priv: private structure
1663  */
free_dma_tx_desc_resources(struct stmmac_priv * priv)1664 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1665 {
1666 	u32 tx_count = priv->plat->tx_queues_to_use;
1667 	u32 queue;
1668 
1669 	/* Free TX queue resources */
1670 	for (queue = 0; queue < tx_count; queue++) {
1671 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1672 		size_t size;
1673 		void *addr;
1674 
1675 		/* Release the DMA TX socket buffers */
1676 		dma_free_tx_skbufs(priv, queue);
1677 
1678 		if (priv->extend_desc) {
1679 			size = sizeof(struct dma_extended_desc);
1680 			addr = tx_q->dma_etx;
1681 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1682 			size = sizeof(struct dma_edesc);
1683 			addr = tx_q->dma_entx;
1684 		} else {
1685 			size = sizeof(struct dma_desc);
1686 			addr = tx_q->dma_tx;
1687 		}
1688 
1689 		size *= priv->dma_tx_size;
1690 
1691 		dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1692 
1693 		kfree(tx_q->tx_skbuff_dma);
1694 		kfree(tx_q->tx_skbuff);
1695 	}
1696 }
1697 
1698 /**
1699  * alloc_dma_rx_desc_resources - alloc RX resources.
1700  * @priv: private structure
1701  * Description: according to which descriptor can be used (extend or basic)
1702  * this function allocates the resources for TX and RX paths. In case of
1703  * reception, for example, it pre-allocated the RX socket buffer in order to
1704  * allow zero-copy mechanism.
1705  */
alloc_dma_rx_desc_resources(struct stmmac_priv * priv)1706 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1707 {
1708 	u32 rx_count = priv->plat->rx_queues_to_use;
1709 	int ret = -ENOMEM;
1710 	u32 queue;
1711 
1712 	/* RX queues buffers and DMA */
1713 	for (queue = 0; queue < rx_count; queue++) {
1714 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1715 		struct page_pool_params pp_params = { 0 };
1716 		unsigned int num_pages;
1717 
1718 		rx_q->queue_index = queue;
1719 		rx_q->priv_data = priv;
1720 
1721 		pp_params.flags = PP_FLAG_DMA_MAP;
1722 		pp_params.pool_size = priv->dma_rx_size;
1723 		num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1724 		pp_params.order = ilog2(num_pages);
1725 		pp_params.nid = dev_to_node(priv->device);
1726 		pp_params.dev = priv->device;
1727 		pp_params.dma_dir = DMA_FROM_DEVICE;
1728 
1729 		rx_q->page_pool = page_pool_create(&pp_params);
1730 		if (IS_ERR(rx_q->page_pool)) {
1731 			ret = PTR_ERR(rx_q->page_pool);
1732 			rx_q->page_pool = NULL;
1733 			goto err_dma;
1734 		}
1735 
1736 		rx_q->buf_pool = kcalloc(priv->dma_rx_size,
1737 					 sizeof(*rx_q->buf_pool),
1738 					 GFP_KERNEL);
1739 		if (!rx_q->buf_pool)
1740 			goto err_dma;
1741 
1742 		if (priv->extend_desc) {
1743 			rx_q->dma_erx = dma_alloc_coherent(priv->device,
1744 							   priv->dma_rx_size *
1745 							   sizeof(struct dma_extended_desc),
1746 							   &rx_q->dma_rx_phy,
1747 							   GFP_KERNEL);
1748 			if (!rx_q->dma_erx)
1749 				goto err_dma;
1750 
1751 		} else {
1752 			rx_q->dma_rx = dma_alloc_coherent(priv->device,
1753 							  priv->dma_rx_size *
1754 							  sizeof(struct dma_desc),
1755 							  &rx_q->dma_rx_phy,
1756 							  GFP_KERNEL);
1757 			if (!rx_q->dma_rx)
1758 				goto err_dma;
1759 		}
1760 	}
1761 
1762 	return 0;
1763 
1764 err_dma:
1765 	free_dma_rx_desc_resources(priv);
1766 
1767 	return ret;
1768 }
1769 
1770 /**
1771  * alloc_dma_tx_desc_resources - alloc TX resources.
1772  * @priv: private structure
1773  * Description: according to which descriptor can be used (extend or basic)
1774  * this function allocates the resources for TX and RX paths. In case of
1775  * reception, for example, it pre-allocated the RX socket buffer in order to
1776  * allow zero-copy mechanism.
1777  */
alloc_dma_tx_desc_resources(struct stmmac_priv * priv)1778 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1779 {
1780 	u32 tx_count = priv->plat->tx_queues_to_use;
1781 	int ret = -ENOMEM;
1782 	u32 queue;
1783 
1784 	/* TX queues buffers and DMA */
1785 	for (queue = 0; queue < tx_count; queue++) {
1786 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1787 		size_t size;
1788 		void *addr;
1789 
1790 		tx_q->queue_index = queue;
1791 		tx_q->priv_data = priv;
1792 
1793 		tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
1794 					      sizeof(*tx_q->tx_skbuff_dma),
1795 					      GFP_KERNEL);
1796 		if (!tx_q->tx_skbuff_dma)
1797 			goto err_dma;
1798 
1799 		tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
1800 					  sizeof(struct sk_buff *),
1801 					  GFP_KERNEL);
1802 		if (!tx_q->tx_skbuff)
1803 			goto err_dma;
1804 
1805 		if (priv->extend_desc)
1806 			size = sizeof(struct dma_extended_desc);
1807 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1808 			size = sizeof(struct dma_edesc);
1809 		else
1810 			size = sizeof(struct dma_desc);
1811 
1812 		size *= priv->dma_tx_size;
1813 
1814 		addr = dma_alloc_coherent(priv->device, size,
1815 					  &tx_q->dma_tx_phy, GFP_KERNEL);
1816 		if (!addr)
1817 			goto err_dma;
1818 
1819 		if (priv->extend_desc)
1820 			tx_q->dma_etx = addr;
1821 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1822 			tx_q->dma_entx = addr;
1823 		else
1824 			tx_q->dma_tx = addr;
1825 	}
1826 
1827 	return 0;
1828 
1829 err_dma:
1830 	free_dma_tx_desc_resources(priv);
1831 	return ret;
1832 }
1833 
1834 /**
1835  * alloc_dma_desc_resources - alloc TX/RX resources.
1836  * @priv: private structure
1837  * Description: according to which descriptor can be used (extend or basic)
1838  * this function allocates the resources for TX and RX paths. In case of
1839  * reception, for example, it pre-allocated the RX socket buffer in order to
1840  * allow zero-copy mechanism.
1841  */
alloc_dma_desc_resources(struct stmmac_priv * priv)1842 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1843 {
1844 	/* RX Allocation */
1845 	int ret = alloc_dma_rx_desc_resources(priv);
1846 
1847 	if (ret)
1848 		return ret;
1849 
1850 	ret = alloc_dma_tx_desc_resources(priv);
1851 
1852 	return ret;
1853 }
1854 
1855 /**
1856  * free_dma_desc_resources - free dma desc resources
1857  * @priv: private structure
1858  */
free_dma_desc_resources(struct stmmac_priv * priv)1859 static void free_dma_desc_resources(struct stmmac_priv *priv)
1860 {
1861 	/* Release the DMA RX socket buffers */
1862 	free_dma_rx_desc_resources(priv);
1863 
1864 	/* Release the DMA TX socket buffers */
1865 	free_dma_tx_desc_resources(priv);
1866 }
1867 
1868 /**
1869  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1870  *  @priv: driver private structure
1871  *  Description: It is used for enabling the rx queues in the MAC
1872  */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)1873 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1874 {
1875 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1876 	int queue;
1877 	u8 mode;
1878 
1879 	for (queue = 0; queue < rx_queues_count; queue++) {
1880 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1881 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1882 	}
1883 }
1884 
1885 /**
1886  * stmmac_start_rx_dma - start RX DMA channel
1887  * @priv: driver private structure
1888  * @chan: RX channel index
1889  * Description:
1890  * This starts a RX DMA channel
1891  */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)1892 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1893 {
1894 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1895 	stmmac_start_rx(priv, priv->ioaddr, chan);
1896 }
1897 
1898 /**
1899  * stmmac_start_tx_dma - start TX DMA channel
1900  * @priv: driver private structure
1901  * @chan: TX channel index
1902  * Description:
1903  * This starts a TX DMA channel
1904  */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)1905 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1906 {
1907 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1908 	stmmac_start_tx(priv, priv->ioaddr, chan);
1909 }
1910 
1911 /**
1912  * stmmac_stop_rx_dma - stop RX DMA channel
1913  * @priv: driver private structure
1914  * @chan: RX channel index
1915  * Description:
1916  * This stops a RX DMA channel
1917  */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)1918 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1919 {
1920 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1921 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1922 }
1923 
1924 /**
1925  * stmmac_stop_tx_dma - stop TX DMA channel
1926  * @priv: driver private structure
1927  * @chan: TX channel index
1928  * Description:
1929  * This stops a TX DMA channel
1930  */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)1931 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1932 {
1933 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1934 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1935 }
1936 
1937 /**
1938  * stmmac_start_all_dma - start all RX and TX DMA channels
1939  * @priv: driver private structure
1940  * Description:
1941  * This starts all the RX and TX DMA channels
1942  */
stmmac_start_all_dma(struct stmmac_priv * priv)1943 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1944 {
1945 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1946 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1947 	u32 chan = 0;
1948 
1949 	for (chan = 0; chan < rx_channels_count; chan++)
1950 		stmmac_start_rx_dma(priv, chan);
1951 
1952 	for (chan = 0; chan < tx_channels_count; chan++)
1953 		stmmac_start_tx_dma(priv, chan);
1954 }
1955 
1956 /**
1957  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1958  * @priv: driver private structure
1959  * Description:
1960  * This stops the RX and TX DMA channels
1961  */
stmmac_stop_all_dma(struct stmmac_priv * priv)1962 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1963 {
1964 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1965 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1966 	u32 chan = 0;
1967 
1968 	for (chan = 0; chan < rx_channels_count; chan++)
1969 		stmmac_stop_rx_dma(priv, chan);
1970 
1971 	for (chan = 0; chan < tx_channels_count; chan++)
1972 		stmmac_stop_tx_dma(priv, chan);
1973 }
1974 
1975 /**
1976  *  stmmac_dma_operation_mode - HW DMA operation mode
1977  *  @priv: driver private structure
1978  *  Description: it is used for configuring the DMA operation mode register in
1979  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1980  */
stmmac_dma_operation_mode(struct stmmac_priv * priv)1981 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1982 {
1983 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1984 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1985 	int rxfifosz = priv->plat->rx_fifo_size;
1986 	int txfifosz = priv->plat->tx_fifo_size;
1987 	u32 txmode = 0;
1988 	u32 rxmode = 0;
1989 	u32 chan = 0;
1990 	u8 qmode = 0;
1991 
1992 	if (rxfifosz == 0)
1993 		rxfifosz = priv->dma_cap.rx_fifo_size;
1994 	if (txfifosz == 0)
1995 		txfifosz = priv->dma_cap.tx_fifo_size;
1996 
1997 	/* Adjust for real per queue fifo size */
1998 	rxfifosz /= rx_channels_count;
1999 	txfifosz /= tx_channels_count;
2000 
2001 	if (priv->plat->force_thresh_dma_mode) {
2002 		txmode = tc;
2003 		rxmode = tc;
2004 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2005 		/*
2006 		 * In case of GMAC, SF mode can be enabled
2007 		 * to perform the TX COE in HW. This depends on:
2008 		 * 1) TX COE if actually supported
2009 		 * 2) There is no bugged Jumbo frame support
2010 		 *    that needs to not insert csum in the TDES.
2011 		 */
2012 		txmode = SF_DMA_MODE;
2013 		rxmode = SF_DMA_MODE;
2014 		priv->xstats.threshold = SF_DMA_MODE;
2015 	} else {
2016 		txmode = tc;
2017 		rxmode = SF_DMA_MODE;
2018 	}
2019 
2020 	/* configure all channels */
2021 	for (chan = 0; chan < rx_channels_count; chan++) {
2022 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2023 
2024 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2025 				rxfifosz, qmode);
2026 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
2027 				chan);
2028 	}
2029 
2030 	for (chan = 0; chan < tx_channels_count; chan++) {
2031 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2032 
2033 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2034 				txfifosz, qmode);
2035 	}
2036 }
2037 
2038 /**
2039  * stmmac_tx_clean - to manage the transmission completion
2040  * @priv: driver private structure
2041  * @budget: napi budget limiting this functions packet handling
2042  * @queue: TX queue index
2043  * Description: it reclaims the transmit resources after transmission completes.
2044  */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue)2045 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2046 {
2047 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2048 	unsigned int bytes_compl = 0, pkts_compl = 0;
2049 	unsigned int entry, count = 0;
2050 
2051 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2052 
2053 	priv->xstats.tx_clean++;
2054 
2055 	entry = tx_q->dirty_tx;
2056 	while ((entry != tx_q->cur_tx) && (count < budget)) {
2057 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
2058 		struct dma_desc *p;
2059 		int status;
2060 
2061 		if (priv->extend_desc)
2062 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2063 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2064 			p = &tx_q->dma_entx[entry].basic;
2065 		else
2066 			p = tx_q->dma_tx + entry;
2067 
2068 		status = stmmac_tx_status(priv, &priv->dev->stats,
2069 				&priv->xstats, p, priv->ioaddr);
2070 		/* Check if the descriptor is owned by the DMA */
2071 		if (unlikely(status & tx_dma_own))
2072 			break;
2073 
2074 		count++;
2075 
2076 		/* Make sure descriptor fields are read after reading
2077 		 * the own bit.
2078 		 */
2079 		dma_rmb();
2080 
2081 		/* Just consider the last segment and ...*/
2082 		if (likely(!(status & tx_not_ls))) {
2083 			/* ... verify the status error condition */
2084 			if (unlikely(status & tx_err)) {
2085 				priv->dev->stats.tx_errors++;
2086 			} else {
2087 				priv->dev->stats.tx_packets++;
2088 				priv->xstats.tx_pkt_n++;
2089 			}
2090 			stmmac_get_tx_hwtstamp(priv, p, skb);
2091 		}
2092 
2093 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
2094 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2095 				dma_unmap_page(priv->device,
2096 					       tx_q->tx_skbuff_dma[entry].buf,
2097 					       tx_q->tx_skbuff_dma[entry].len,
2098 					       DMA_TO_DEVICE);
2099 			else
2100 				dma_unmap_single(priv->device,
2101 						 tx_q->tx_skbuff_dma[entry].buf,
2102 						 tx_q->tx_skbuff_dma[entry].len,
2103 						 DMA_TO_DEVICE);
2104 			tx_q->tx_skbuff_dma[entry].buf = 0;
2105 			tx_q->tx_skbuff_dma[entry].len = 0;
2106 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2107 		}
2108 
2109 		stmmac_clean_desc3(priv, tx_q, p);
2110 
2111 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2112 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2113 
2114 		if (likely(skb != NULL)) {
2115 			pkts_compl++;
2116 			bytes_compl += skb->len;
2117 			dev_consume_skb_any(skb);
2118 			tx_q->tx_skbuff[entry] = NULL;
2119 		}
2120 
2121 		stmmac_release_tx_desc(priv, p, priv->mode);
2122 
2123 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
2124 	}
2125 	tx_q->dirty_tx = entry;
2126 
2127 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2128 				  pkts_compl, bytes_compl);
2129 
2130 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2131 								queue))) &&
2132 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2133 
2134 		netif_dbg(priv, tx_done, priv->dev,
2135 			  "%s: restart transmit\n", __func__);
2136 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2137 	}
2138 
2139 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
2140 		stmmac_enable_eee_mode(priv);
2141 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2142 	}
2143 
2144 	/* We still have pending packets, let's call for a new scheduling */
2145 	if (tx_q->dirty_tx != tx_q->cur_tx)
2146 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2147 
2148 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2149 
2150 	return count;
2151 }
2152 
2153 /**
2154  * stmmac_tx_err - to manage the tx error
2155  * @priv: driver private structure
2156  * @chan: channel index
2157  * Description: it cleans the descriptors and restarts the transmission
2158  * in case of transmission errors.
2159  */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2160 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2161 {
2162 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2163 
2164 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2165 
2166 	stmmac_stop_tx_dma(priv, chan);
2167 	dma_free_tx_skbufs(priv, chan);
2168 	stmmac_clear_tx_descriptors(priv, chan);
2169 	tx_q->dirty_tx = 0;
2170 	tx_q->cur_tx = 0;
2171 	tx_q->mss = 0;
2172 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2173 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2174 			    tx_q->dma_tx_phy, chan);
2175 	stmmac_start_tx_dma(priv, chan);
2176 
2177 	priv->dev->stats.tx_errors++;
2178 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2179 }
2180 
2181 /**
2182  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2183  *  @priv: driver private structure
2184  *  @txmode: TX operating mode
2185  *  @rxmode: RX operating mode
2186  *  @chan: channel index
2187  *  Description: it is used for configuring of the DMA operation mode in
2188  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2189  *  mode.
2190  */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2191 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2192 					  u32 rxmode, u32 chan)
2193 {
2194 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2195 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2196 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2197 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2198 	int rxfifosz = priv->plat->rx_fifo_size;
2199 	int txfifosz = priv->plat->tx_fifo_size;
2200 
2201 	if (rxfifosz == 0)
2202 		rxfifosz = priv->dma_cap.rx_fifo_size;
2203 	if (txfifosz == 0)
2204 		txfifosz = priv->dma_cap.tx_fifo_size;
2205 
2206 	/* Adjust for real per queue fifo size */
2207 	rxfifosz /= rx_channels_count;
2208 	txfifosz /= tx_channels_count;
2209 
2210 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2211 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2212 }
2213 
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2214 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2215 {
2216 	int ret;
2217 
2218 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2219 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2220 	if (ret && (ret != -EINVAL)) {
2221 		stmmac_global_err(priv);
2222 		return true;
2223 	}
2224 
2225 	return false;
2226 }
2227 
stmmac_napi_check(struct stmmac_priv * priv,u32 chan)2228 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2229 {
2230 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2231 						 &priv->xstats, chan);
2232 	struct stmmac_channel *ch = &priv->channel[chan];
2233 	unsigned long flags;
2234 
2235 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2236 		if (napi_schedule_prep(&ch->rx_napi)) {
2237 			spin_lock_irqsave(&ch->lock, flags);
2238 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2239 			spin_unlock_irqrestore(&ch->lock, flags);
2240 			__napi_schedule(&ch->rx_napi);
2241 		}
2242 	}
2243 
2244 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2245 		if (napi_schedule_prep(&ch->tx_napi)) {
2246 			spin_lock_irqsave(&ch->lock, flags);
2247 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2248 			spin_unlock_irqrestore(&ch->lock, flags);
2249 			__napi_schedule(&ch->tx_napi);
2250 		}
2251 	}
2252 
2253 	return status;
2254 }
2255 
2256 /**
2257  * stmmac_dma_interrupt - DMA ISR
2258  * @priv: driver private structure
2259  * Description: this is the DMA ISR. It is called by the main ISR.
2260  * It calls the dwmac dma routine and schedule poll method in case of some
2261  * work can be done.
2262  */
stmmac_dma_interrupt(struct stmmac_priv * priv)2263 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2264 {
2265 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2266 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2267 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2268 				tx_channel_count : rx_channel_count;
2269 	u32 chan;
2270 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2271 
2272 	/* Make sure we never check beyond our status buffer. */
2273 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2274 		channels_to_check = ARRAY_SIZE(status);
2275 
2276 	for (chan = 0; chan < channels_to_check; chan++)
2277 		status[chan] = stmmac_napi_check(priv, chan);
2278 
2279 	for (chan = 0; chan < tx_channel_count; chan++) {
2280 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2281 			/* Try to bump up the dma threshold on this failure */
2282 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2283 			    (tc <= 256)) {
2284 				tc += 64;
2285 				if (priv->plat->force_thresh_dma_mode)
2286 					stmmac_set_dma_operation_mode(priv,
2287 								      tc,
2288 								      tc,
2289 								      chan);
2290 				else
2291 					stmmac_set_dma_operation_mode(priv,
2292 								    tc,
2293 								    SF_DMA_MODE,
2294 								    chan);
2295 				priv->xstats.threshold = tc;
2296 			}
2297 		} else if (unlikely(status[chan] == tx_hard_error)) {
2298 			stmmac_tx_err(priv, chan);
2299 		}
2300 	}
2301 }
2302 
2303 /**
2304  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2305  * @priv: driver private structure
2306  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2307  */
stmmac_mmc_setup(struct stmmac_priv * priv)2308 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2309 {
2310 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2311 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2312 
2313 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2314 
2315 	if (priv->dma_cap.rmon) {
2316 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2317 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2318 	} else
2319 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2320 }
2321 
2322 /**
2323  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2324  * @priv: driver private structure
2325  * Description:
2326  *  new GMAC chip generations have a new register to indicate the
2327  *  presence of the optional feature/functions.
2328  *  This can be also used to override the value passed through the
2329  *  platform and necessary for old MAC10/100 and GMAC chips.
2330  */
stmmac_get_hw_features(struct stmmac_priv * priv)2331 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2332 {
2333 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2334 }
2335 
2336 /**
2337  * stmmac_check_ether_addr - check if the MAC addr is valid
2338  * @priv: driver private structure
2339  * Description:
2340  * it is to verify if the MAC address is valid, in case of failures it
2341  * generates a random MAC address
2342  */
stmmac_check_ether_addr(struct stmmac_priv * priv)2343 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2344 {
2345 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2346 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2347 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2348 			eth_hw_addr_random(priv->dev);
2349 		dev_info(priv->device, "device MAC address %pM\n",
2350 			 priv->dev->dev_addr);
2351 	}
2352 }
2353 
2354 /**
2355  * stmmac_init_dma_engine - DMA init.
2356  * @priv: driver private structure
2357  * Description:
2358  * It inits the DMA invoking the specific MAC/GMAC callback.
2359  * Some DMA parameters can be passed from the platform;
2360  * in case of these are not passed a default is kept for the MAC or GMAC.
2361  */
stmmac_init_dma_engine(struct stmmac_priv * priv)2362 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2363 {
2364 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2365 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2366 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2367 	struct stmmac_rx_queue *rx_q;
2368 	struct stmmac_tx_queue *tx_q;
2369 	u32 chan = 0;
2370 	int atds = 0;
2371 	int ret = 0;
2372 
2373 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2374 		dev_err(priv->device, "Invalid DMA configuration\n");
2375 		return -EINVAL;
2376 	}
2377 
2378 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2379 		atds = 1;
2380 
2381 	ret = stmmac_reset(priv, priv->ioaddr);
2382 	if (ret) {
2383 		dev_err(priv->device, "Failed to reset the dma\n");
2384 		return ret;
2385 	}
2386 
2387 	/* DMA Configuration */
2388 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2389 
2390 	if (priv->plat->axi)
2391 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2392 
2393 	/* DMA CSR Channel configuration */
2394 	for (chan = 0; chan < dma_csr_ch; chan++)
2395 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2396 
2397 	/* DMA RX Channel Configuration */
2398 	for (chan = 0; chan < rx_channels_count; chan++) {
2399 		rx_q = &priv->rx_queue[chan];
2400 
2401 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2402 				    rx_q->dma_rx_phy, chan);
2403 
2404 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2405 				     (priv->dma_rx_size *
2406 				      sizeof(struct dma_desc));
2407 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2408 				       rx_q->rx_tail_addr, chan);
2409 	}
2410 
2411 	/* DMA TX Channel Configuration */
2412 	for (chan = 0; chan < tx_channels_count; chan++) {
2413 		tx_q = &priv->tx_queue[chan];
2414 
2415 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2416 				    tx_q->dma_tx_phy, chan);
2417 
2418 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2419 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2420 				       tx_q->tx_tail_addr, chan);
2421 	}
2422 
2423 	return ret;
2424 }
2425 
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)2426 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2427 {
2428 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2429 
2430 	mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2431 }
2432 
2433 /**
2434  * stmmac_tx_timer - mitigation sw timer for tx.
2435  * @t: data pointer
2436  * Description:
2437  * This is the timer handler to directly invoke the stmmac_tx_clean.
2438  */
stmmac_tx_timer(struct timer_list * t)2439 static void stmmac_tx_timer(struct timer_list *t)
2440 {
2441 	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2442 	struct stmmac_priv *priv = tx_q->priv_data;
2443 	struct stmmac_channel *ch;
2444 
2445 	ch = &priv->channel[tx_q->queue_index];
2446 
2447 	if (likely(napi_schedule_prep(&ch->tx_napi))) {
2448 		unsigned long flags;
2449 
2450 		spin_lock_irqsave(&ch->lock, flags);
2451 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2452 		spin_unlock_irqrestore(&ch->lock, flags);
2453 		__napi_schedule(&ch->tx_napi);
2454 	}
2455 }
2456 
2457 /**
2458  * stmmac_init_coalesce - init mitigation options.
2459  * @priv: driver private structure
2460  * Description:
2461  * This inits the coalesce parameters: i.e. timer rate,
2462  * timer handler and default threshold used for enabling the
2463  * interrupt on completion bit.
2464  */
stmmac_init_coalesce(struct stmmac_priv * priv)2465 static void stmmac_init_coalesce(struct stmmac_priv *priv)
2466 {
2467 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2468 	u32 chan;
2469 
2470 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2471 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2472 	priv->rx_coal_frames = STMMAC_RX_FRAMES;
2473 
2474 	for (chan = 0; chan < tx_channel_count; chan++) {
2475 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2476 
2477 		timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2478 	}
2479 }
2480 
stmmac_set_rings_length(struct stmmac_priv * priv)2481 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2482 {
2483 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2484 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2485 	u32 chan;
2486 
2487 	/* set TX ring length */
2488 	for (chan = 0; chan < tx_channels_count; chan++)
2489 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2490 				       (priv->dma_tx_size - 1), chan);
2491 
2492 	/* set RX ring length */
2493 	for (chan = 0; chan < rx_channels_count; chan++)
2494 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2495 				       (priv->dma_rx_size - 1), chan);
2496 }
2497 
2498 /**
2499  *  stmmac_set_tx_queue_weight - Set TX queue weight
2500  *  @priv: driver private structure
2501  *  Description: It is used for setting TX queues weight
2502  */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)2503 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2504 {
2505 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2506 	u32 weight;
2507 	u32 queue;
2508 
2509 	for (queue = 0; queue < tx_queues_count; queue++) {
2510 		weight = priv->plat->tx_queues_cfg[queue].weight;
2511 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2512 	}
2513 }
2514 
2515 /**
2516  *  stmmac_configure_cbs - Configure CBS in TX queue
2517  *  @priv: driver private structure
2518  *  Description: It is used for configuring CBS in AVB TX queues
2519  */
stmmac_configure_cbs(struct stmmac_priv * priv)2520 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2521 {
2522 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2523 	u32 mode_to_use;
2524 	u32 queue;
2525 
2526 	/* queue 0 is reserved for legacy traffic */
2527 	for (queue = 1; queue < tx_queues_count; queue++) {
2528 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2529 		if (mode_to_use == MTL_QUEUE_DCB)
2530 			continue;
2531 
2532 		stmmac_config_cbs(priv, priv->hw,
2533 				priv->plat->tx_queues_cfg[queue].send_slope,
2534 				priv->plat->tx_queues_cfg[queue].idle_slope,
2535 				priv->plat->tx_queues_cfg[queue].high_credit,
2536 				priv->plat->tx_queues_cfg[queue].low_credit,
2537 				queue);
2538 	}
2539 }
2540 
2541 /**
2542  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2543  *  @priv: driver private structure
2544  *  Description: It is used for mapping RX queues to RX dma channels
2545  */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)2546 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2547 {
2548 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2549 	u32 queue;
2550 	u32 chan;
2551 
2552 	for (queue = 0; queue < rx_queues_count; queue++) {
2553 		chan = priv->plat->rx_queues_cfg[queue].chan;
2554 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2555 	}
2556 }
2557 
2558 /**
2559  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2560  *  @priv: driver private structure
2561  *  Description: It is used for configuring the RX Queue Priority
2562  */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)2563 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2564 {
2565 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2566 	u32 queue;
2567 	u32 prio;
2568 
2569 	for (queue = 0; queue < rx_queues_count; queue++) {
2570 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2571 			continue;
2572 
2573 		prio = priv->plat->rx_queues_cfg[queue].prio;
2574 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2575 	}
2576 }
2577 
2578 /**
2579  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2580  *  @priv: driver private structure
2581  *  Description: It is used for configuring the TX Queue Priority
2582  */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)2583 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2584 {
2585 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2586 	u32 queue;
2587 	u32 prio;
2588 
2589 	for (queue = 0; queue < tx_queues_count; queue++) {
2590 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2591 			continue;
2592 
2593 		prio = priv->plat->tx_queues_cfg[queue].prio;
2594 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2595 	}
2596 }
2597 
2598 /**
2599  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2600  *  @priv: driver private structure
2601  *  Description: It is used for configuring the RX queue routing
2602  */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)2603 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2604 {
2605 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2606 	u32 queue;
2607 	u8 packet;
2608 
2609 	for (queue = 0; queue < rx_queues_count; queue++) {
2610 		/* no specific packet type routing specified for the queue */
2611 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2612 			continue;
2613 
2614 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2615 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2616 	}
2617 }
2618 
stmmac_mac_config_rss(struct stmmac_priv * priv)2619 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
2620 {
2621 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
2622 		priv->rss.enable = false;
2623 		return;
2624 	}
2625 
2626 	if (priv->dev->features & NETIF_F_RXHASH)
2627 		priv->rss.enable = true;
2628 	else
2629 		priv->rss.enable = false;
2630 
2631 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
2632 			     priv->plat->rx_queues_to_use);
2633 }
2634 
2635 /**
2636  *  stmmac_mtl_configuration - Configure MTL
2637  *  @priv: driver private structure
2638  *  Description: It is used for configurring MTL
2639  */
stmmac_mtl_configuration(struct stmmac_priv * priv)2640 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2641 {
2642 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2643 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2644 
2645 	if (tx_queues_count > 1)
2646 		stmmac_set_tx_queue_weight(priv);
2647 
2648 	/* Configure MTL RX algorithms */
2649 	if (rx_queues_count > 1)
2650 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2651 				priv->plat->rx_sched_algorithm);
2652 
2653 	/* Configure MTL TX algorithms */
2654 	if (tx_queues_count > 1)
2655 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2656 				priv->plat->tx_sched_algorithm);
2657 
2658 	/* Configure CBS in AVB TX queues */
2659 	if (tx_queues_count > 1)
2660 		stmmac_configure_cbs(priv);
2661 
2662 	/* Map RX MTL to DMA channels */
2663 	stmmac_rx_queue_dma_chan_map(priv);
2664 
2665 	/* Enable MAC RX Queues */
2666 	stmmac_mac_enable_rx_queues(priv);
2667 
2668 	/* Set RX priorities */
2669 	if (rx_queues_count > 1)
2670 		stmmac_mac_config_rx_queues_prio(priv);
2671 
2672 	/* Set TX priorities */
2673 	if (tx_queues_count > 1)
2674 		stmmac_mac_config_tx_queues_prio(priv);
2675 
2676 	/* Set RX routing */
2677 	if (rx_queues_count > 1)
2678 		stmmac_mac_config_rx_queues_routing(priv);
2679 
2680 	/* Receive Side Scaling */
2681 	if (rx_queues_count > 1)
2682 		stmmac_mac_config_rss(priv);
2683 }
2684 
stmmac_safety_feat_configuration(struct stmmac_priv * priv)2685 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2686 {
2687 	if (priv->dma_cap.asp) {
2688 		netdev_info(priv->dev, "Enabling Safety Features\n");
2689 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2690 	} else {
2691 		netdev_info(priv->dev, "No Safety Features support found\n");
2692 	}
2693 }
2694 
2695 /**
2696  * stmmac_hw_setup - setup mac in a usable state.
2697  *  @dev : pointer to the device structure.
2698  *  @ptp_register: register PTP if set
2699  *  Description:
2700  *  this is the main function to setup the HW in a usable state because the
2701  *  dma engine is reset, the core registers are configured (e.g. AXI,
2702  *  Checksum features, timers). The DMA is ready to start receiving and
2703  *  transmitting.
2704  *  Return value:
2705  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2706  *  file on failure.
2707  */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)2708 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
2709 {
2710 	struct stmmac_priv *priv = netdev_priv(dev);
2711 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2712 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2713 	u32 chan;
2714 	int ret;
2715 
2716 	/* DMA initialization and SW reset */
2717 	ret = stmmac_init_dma_engine(priv);
2718 	if (ret < 0) {
2719 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2720 			   __func__);
2721 		return ret;
2722 	}
2723 
2724 	/* Copy the MAC addr into the HW  */
2725 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2726 
2727 	/* PS and related bits will be programmed according to the speed */
2728 	if (priv->hw->pcs) {
2729 		int speed = priv->plat->mac_port_sel_speed;
2730 
2731 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2732 		    (speed == SPEED_1000)) {
2733 			priv->hw->ps = speed;
2734 		} else {
2735 			dev_warn(priv->device, "invalid port speed\n");
2736 			priv->hw->ps = 0;
2737 		}
2738 	}
2739 
2740 	/* Initialize the MAC Core */
2741 	stmmac_core_init(priv, priv->hw, dev);
2742 
2743 	/* Initialize MTL*/
2744 	stmmac_mtl_configuration(priv);
2745 
2746 	/* Initialize Safety Features */
2747 	stmmac_safety_feat_configuration(priv);
2748 
2749 	ret = stmmac_rx_ipc(priv, priv->hw);
2750 	if (!ret) {
2751 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2752 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2753 		priv->hw->rx_csum = 0;
2754 	}
2755 
2756 	/* Enable the MAC Rx/Tx */
2757 	stmmac_mac_set(priv, priv->ioaddr, true);
2758 
2759 	/* Set the HW DMA mode and the COE */
2760 	stmmac_dma_operation_mode(priv);
2761 
2762 	stmmac_mmc_setup(priv);
2763 
2764 	if (ptp_register) {
2765 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2766 		if (ret < 0)
2767 			netdev_warn(priv->dev,
2768 				    "failed to enable PTP reference clock: %pe\n",
2769 				    ERR_PTR(ret));
2770 	}
2771 
2772 	ret = stmmac_init_ptp(priv);
2773 	if (ret == -EOPNOTSUPP)
2774 		netdev_warn(priv->dev, "PTP not supported by HW\n");
2775 	else if (ret)
2776 		netdev_warn(priv->dev, "PTP init failed\n");
2777 	else if (ptp_register)
2778 		stmmac_ptp_register(priv);
2779 
2780 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
2781 
2782 	/* Convert the timer from msec to usec */
2783 	if (!priv->tx_lpi_timer)
2784 		priv->tx_lpi_timer = eee_timer * 1000;
2785 
2786 	if (priv->use_riwt) {
2787 		if (!priv->rx_riwt)
2788 			priv->rx_riwt = DEF_DMA_RIWT;
2789 
2790 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
2791 	}
2792 
2793 	if (priv->hw->pcs)
2794 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
2795 
2796 	/* set TX and RX rings length */
2797 	stmmac_set_rings_length(priv);
2798 
2799 	/* Enable TSO */
2800 	if (priv->tso) {
2801 		for (chan = 0; chan < tx_cnt; chan++) {
2802 			struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2803 
2804 			/* TSO and TBS cannot co-exist */
2805 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
2806 				continue;
2807 
2808 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2809 		}
2810 	}
2811 
2812 	/* Enable Split Header */
2813 	if (priv->sph && priv->hw->rx_csum) {
2814 		for (chan = 0; chan < rx_cnt; chan++)
2815 			stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
2816 	}
2817 
2818 	/* VLAN Tag Insertion */
2819 	if (priv->dma_cap.vlins)
2820 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
2821 
2822 	/* TBS */
2823 	for (chan = 0; chan < tx_cnt; chan++) {
2824 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2825 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
2826 
2827 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
2828 	}
2829 
2830 	/* Configure real RX and TX queues */
2831 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
2832 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
2833 
2834 	/* Start the ball rolling... */
2835 	stmmac_start_all_dma(priv);
2836 
2837 	return 0;
2838 }
2839 
stmmac_hw_teardown(struct net_device * dev)2840 static void stmmac_hw_teardown(struct net_device *dev)
2841 {
2842 	struct stmmac_priv *priv = netdev_priv(dev);
2843 
2844 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2845 }
2846 
2847 /**
2848  *  stmmac_open - open entry point of the driver
2849  *  @dev : pointer to the device structure.
2850  *  Description:
2851  *  This function is the open entry point of the driver.
2852  *  Return value:
2853  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2854  *  file on failure.
2855  */
stmmac_open(struct net_device * dev)2856 static int stmmac_open(struct net_device *dev)
2857 {
2858 	struct stmmac_priv *priv = netdev_priv(dev);
2859 	int bfsize = 0;
2860 	u32 chan;
2861 	int ret;
2862 
2863 	ret = pm_runtime_get_sync(priv->device);
2864 	if (ret < 0) {
2865 		pm_runtime_put_noidle(priv->device);
2866 		return ret;
2867 	}
2868 
2869 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
2870 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
2871 	    priv->hw->xpcs == NULL) {
2872 		ret = stmmac_init_phy(dev);
2873 		if (ret) {
2874 			netdev_err(priv->dev,
2875 				   "%s: Cannot attach to PHY (error: %d)\n",
2876 				   __func__, ret);
2877 			goto init_phy_error;
2878 		}
2879 	}
2880 
2881 	/* Extra statistics */
2882 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2883 	priv->xstats.threshold = tc;
2884 
2885 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
2886 	if (bfsize < 0)
2887 		bfsize = 0;
2888 
2889 	if (bfsize < BUF_SIZE_16KiB)
2890 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
2891 
2892 	priv->dma_buf_sz = bfsize;
2893 	buf_sz = bfsize;
2894 
2895 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2896 
2897 	if (!priv->dma_tx_size)
2898 		priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
2899 	if (!priv->dma_rx_size)
2900 		priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
2901 
2902 	/* Earlier check for TBS */
2903 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
2904 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2905 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
2906 
2907 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
2908 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
2909 	}
2910 
2911 	ret = alloc_dma_desc_resources(priv);
2912 	if (ret < 0) {
2913 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2914 			   __func__);
2915 		goto dma_desc_error;
2916 	}
2917 
2918 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2919 	if (ret < 0) {
2920 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2921 			   __func__);
2922 		goto init_error;
2923 	}
2924 
2925 	if (priv->plat->serdes_powerup) {
2926 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
2927 		if (ret < 0) {
2928 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
2929 				   __func__);
2930 			goto init_error;
2931 		}
2932 	}
2933 
2934 	ret = stmmac_hw_setup(dev, true);
2935 	if (ret < 0) {
2936 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2937 		goto init_error;
2938 	}
2939 
2940 	stmmac_init_coalesce(priv);
2941 
2942 	phylink_start(priv->phylink);
2943 	/* We may have called phylink_speed_down before */
2944 	phylink_speed_up(priv->phylink);
2945 
2946 	/* Request the IRQ lines */
2947 	ret = request_irq(dev->irq, stmmac_interrupt,
2948 			  IRQF_SHARED, dev->name, dev);
2949 	if (unlikely(ret < 0)) {
2950 		netdev_err(priv->dev,
2951 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2952 			   __func__, dev->irq, ret);
2953 		goto irq_error;
2954 	}
2955 
2956 	/* Request the Wake IRQ in case of another line is used for WoL */
2957 	if (priv->wol_irq != dev->irq) {
2958 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2959 				  IRQF_SHARED, dev->name, dev);
2960 		if (unlikely(ret < 0)) {
2961 			netdev_err(priv->dev,
2962 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2963 				   __func__, priv->wol_irq, ret);
2964 			goto wolirq_error;
2965 		}
2966 	}
2967 
2968 	/* Request the IRQ lines */
2969 	if (priv->lpi_irq > 0) {
2970 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2971 				  dev->name, dev);
2972 		if (unlikely(ret < 0)) {
2973 			netdev_err(priv->dev,
2974 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2975 				   __func__, priv->lpi_irq, ret);
2976 			goto lpiirq_error;
2977 		}
2978 	}
2979 
2980 	stmmac_enable_all_queues(priv);
2981 	netif_tx_start_all_queues(priv->dev);
2982 
2983 	return 0;
2984 
2985 lpiirq_error:
2986 	if (priv->wol_irq != dev->irq)
2987 		free_irq(priv->wol_irq, dev);
2988 wolirq_error:
2989 	free_irq(dev->irq, dev);
2990 irq_error:
2991 	phylink_stop(priv->phylink);
2992 
2993 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2994 		del_timer_sync(&priv->tx_queue[chan].txtimer);
2995 
2996 	stmmac_hw_teardown(dev);
2997 init_error:
2998 	free_dma_desc_resources(priv);
2999 dma_desc_error:
3000 	phylink_disconnect_phy(priv->phylink);
3001 init_phy_error:
3002 	pm_runtime_put(priv->device);
3003 	return ret;
3004 }
3005 
3006 /**
3007  *  stmmac_release - close entry point of the driver
3008  *  @dev : device pointer.
3009  *  Description:
3010  *  This is the stop entry point of the driver.
3011  */
stmmac_release(struct net_device * dev)3012 static int stmmac_release(struct net_device *dev)
3013 {
3014 	struct stmmac_priv *priv = netdev_priv(dev);
3015 	u32 chan;
3016 
3017 	netif_tx_disable(dev);
3018 
3019 	if (device_may_wakeup(priv->device))
3020 		phylink_speed_down(priv->phylink, false);
3021 	/* Stop and disconnect the PHY */
3022 	phylink_stop(priv->phylink);
3023 	phylink_disconnect_phy(priv->phylink);
3024 
3025 	stmmac_disable_all_queues(priv);
3026 
3027 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3028 		del_timer_sync(&priv->tx_queue[chan].txtimer);
3029 
3030 	/* Free the IRQ lines */
3031 	free_irq(dev->irq, dev);
3032 	if (priv->wol_irq != dev->irq)
3033 		free_irq(priv->wol_irq, dev);
3034 	if (priv->lpi_irq > 0)
3035 		free_irq(priv->lpi_irq, dev);
3036 
3037 	if (priv->eee_enabled) {
3038 		priv->tx_path_in_lpi_mode = false;
3039 		del_timer_sync(&priv->eee_ctrl_timer);
3040 	}
3041 
3042 	/* Stop TX/RX DMA and clear the descriptors */
3043 	stmmac_stop_all_dma(priv);
3044 
3045 	/* Release and free the Rx/Tx resources */
3046 	free_dma_desc_resources(priv);
3047 
3048 	/* Disable the MAC Rx/Tx */
3049 	stmmac_mac_set(priv, priv->ioaddr, false);
3050 
3051 	/* Powerdown Serdes if there is */
3052 	if (priv->plat->serdes_powerdown)
3053 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3054 
3055 	netif_carrier_off(dev);
3056 
3057 	stmmac_release_ptp(priv);
3058 
3059 	pm_runtime_put(priv->device);
3060 
3061 	return 0;
3062 }
3063 
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)3064 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3065 			       struct stmmac_tx_queue *tx_q)
3066 {
3067 	u16 tag = 0x0, inner_tag = 0x0;
3068 	u32 inner_type = 0x0;
3069 	struct dma_desc *p;
3070 
3071 	if (!priv->dma_cap.vlins)
3072 		return false;
3073 	if (!skb_vlan_tag_present(skb))
3074 		return false;
3075 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3076 		inner_tag = skb_vlan_tag_get(skb);
3077 		inner_type = STMMAC_VLAN_INSERT;
3078 	}
3079 
3080 	tag = skb_vlan_tag_get(skb);
3081 
3082 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3083 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3084 	else
3085 		p = &tx_q->dma_tx[tx_q->cur_tx];
3086 
3087 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3088 		return false;
3089 
3090 	stmmac_set_tx_owner(priv, p);
3091 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3092 	return true;
3093 }
3094 
3095 /**
3096  *  stmmac_tso_allocator - close entry point of the driver
3097  *  @priv: driver private structure
3098  *  @des: buffer start address
3099  *  @total_len: total length to fill in descriptors
3100  *  @last_segment: condition for the last descriptor
3101  *  @queue: TX queue index
3102  *  Description:
3103  *  This function fills descriptor and request new descriptors according to
3104  *  buffer length to fill
3105  */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)3106 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3107 				 int total_len, bool last_segment, u32 queue)
3108 {
3109 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3110 	struct dma_desc *desc;
3111 	u32 buff_size;
3112 	int tmp_len;
3113 
3114 	tmp_len = total_len;
3115 
3116 	while (tmp_len > 0) {
3117 		dma_addr_t curr_addr;
3118 
3119 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3120 						priv->dma_tx_size);
3121 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3122 
3123 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3124 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3125 		else
3126 			desc = &tx_q->dma_tx[tx_q->cur_tx];
3127 
3128 		curr_addr = des + (total_len - tmp_len);
3129 		if (priv->dma_cap.addr64 <= 32)
3130 			desc->des0 = cpu_to_le32(curr_addr);
3131 		else
3132 			stmmac_set_desc_addr(priv, desc, curr_addr);
3133 
3134 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3135 			    TSO_MAX_BUFF_SIZE : tmp_len;
3136 
3137 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3138 				0, 1,
3139 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3140 				0, 0);
3141 
3142 		tmp_len -= TSO_MAX_BUFF_SIZE;
3143 	}
3144 }
3145 
3146 /**
3147  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3148  *  @skb : the socket buffer
3149  *  @dev : device pointer
3150  *  Description: this is the transmit function that is called on TSO frames
3151  *  (support available on GMAC4 and newer chips).
3152  *  Diagram below show the ring programming in case of TSO frames:
3153  *
3154  *  First Descriptor
3155  *   --------
3156  *   | DES0 |---> buffer1 = L2/L3/L4 header
3157  *   | DES1 |---> TCP Payload (can continue on next descr...)
3158  *   | DES2 |---> buffer 1 and 2 len
3159  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3160  *   --------
3161  *	|
3162  *     ...
3163  *	|
3164  *   --------
3165  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3166  *   | DES1 | --|
3167  *   | DES2 | --> buffer 1 and 2 len
3168  *   | DES3 |
3169  *   --------
3170  *
3171  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3172  */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)3173 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3174 {
3175 	struct dma_desc *desc, *first, *mss_desc = NULL;
3176 	struct stmmac_priv *priv = netdev_priv(dev);
3177 	int desc_size, tmp_pay_len = 0, first_tx;
3178 	int nfrags = skb_shinfo(skb)->nr_frags;
3179 	u32 queue = skb_get_queue_mapping(skb);
3180 	unsigned int first_entry, tx_packets;
3181 	struct stmmac_tx_queue *tx_q;
3182 	bool has_vlan, set_ic;
3183 	u8 proto_hdr_len, hdr;
3184 	u32 pay_len, mss;
3185 	dma_addr_t des;
3186 	int i;
3187 
3188 	tx_q = &priv->tx_queue[queue];
3189 	first_tx = tx_q->cur_tx;
3190 
3191 	/* Compute header lengths */
3192 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3193 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3194 		hdr = sizeof(struct udphdr);
3195 	} else {
3196 		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3197 		hdr = tcp_hdrlen(skb);
3198 	}
3199 
3200 	/* Desc availability based on threshold should be enough safe */
3201 	if (unlikely(stmmac_tx_avail(priv, queue) <
3202 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3203 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3204 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3205 								queue));
3206 			/* This is a hard error, log it. */
3207 			netdev_err(priv->dev,
3208 				   "%s: Tx Ring full when queue awake\n",
3209 				   __func__);
3210 		}
3211 		return NETDEV_TX_BUSY;
3212 	}
3213 
3214 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
3215 
3216 	mss = skb_shinfo(skb)->gso_size;
3217 
3218 	/* set new MSS value if needed */
3219 	if (mss != tx_q->mss) {
3220 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3221 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3222 		else
3223 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3224 
3225 		stmmac_set_mss(priv, mss_desc, mss);
3226 		tx_q->mss = mss;
3227 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3228 						priv->dma_tx_size);
3229 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3230 	}
3231 
3232 	if (netif_msg_tx_queued(priv)) {
3233 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
3234 			__func__, hdr, proto_hdr_len, pay_len, mss);
3235 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
3236 			skb->data_len);
3237 	}
3238 
3239 	/* Check if VLAN can be inserted by HW */
3240 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3241 
3242 	first_entry = tx_q->cur_tx;
3243 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3244 
3245 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3246 		desc = &tx_q->dma_entx[first_entry].basic;
3247 	else
3248 		desc = &tx_q->dma_tx[first_entry];
3249 	first = desc;
3250 
3251 	if (has_vlan)
3252 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3253 
3254 	/* first descriptor: fill Headers on Buf1 */
3255 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
3256 			     DMA_TO_DEVICE);
3257 	if (dma_mapping_error(priv->device, des))
3258 		goto dma_map_err;
3259 
3260 	if (priv->dma_cap.addr64 <= 32) {
3261 		first->des0 = cpu_to_le32(des);
3262 
3263 		/* Fill start of payload in buff2 of first descriptor */
3264 		if (pay_len)
3265 			first->des1 = cpu_to_le32(des + proto_hdr_len);
3266 
3267 		/* If needed take extra descriptors to fill the remaining payload */
3268 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3269 	} else {
3270 		stmmac_set_desc_addr(priv, first, des);
3271 		tmp_pay_len = pay_len;
3272 		des += proto_hdr_len;
3273 		pay_len = 0;
3274 	}
3275 
3276 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
3277 
3278 	/* In case two or more DMA transmit descriptors are allocated for this
3279 	 * non-paged SKB data, the DMA buffer address should be saved to
3280 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
3281 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
3282 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
3283 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
3284 	 * sooner or later.
3285 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
3286 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
3287 	 * this DMA buffer right after the DMA engine completely finishes the
3288 	 * full buffer transmission.
3289 	 */
3290 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3291 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
3292 
3293 	/* Prepare fragments */
3294 	for (i = 0; i < nfrags; i++) {
3295 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3296 
3297 		des = skb_frag_dma_map(priv->device, frag, 0,
3298 				       skb_frag_size(frag),
3299 				       DMA_TO_DEVICE);
3300 		if (dma_mapping_error(priv->device, des))
3301 			goto dma_map_err;
3302 
3303 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3304 				     (i == nfrags - 1), queue);
3305 
3306 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3307 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
3308 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
3309 	}
3310 
3311 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
3312 
3313 	/* Only the last descriptor gets to point to the skb. */
3314 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
3315 
3316 	/* Manage tx mitigation */
3317 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
3318 	tx_q->tx_count_frames += tx_packets;
3319 
3320 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3321 		set_ic = true;
3322 	else if (!priv->tx_coal_frames)
3323 		set_ic = false;
3324 	else if (tx_packets > priv->tx_coal_frames)
3325 		set_ic = true;
3326 	else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3327 		set_ic = true;
3328 	else
3329 		set_ic = false;
3330 
3331 	if (set_ic) {
3332 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3333 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3334 		else
3335 			desc = &tx_q->dma_tx[tx_q->cur_tx];
3336 
3337 		tx_q->tx_count_frames = 0;
3338 		stmmac_set_tx_ic(priv, desc);
3339 		priv->xstats.tx_set_ic_bit++;
3340 	}
3341 
3342 	/* We've used all descriptors we need for this skb, however,
3343 	 * advance cur_tx so that it references a fresh descriptor.
3344 	 * ndo_start_xmit will fill this descriptor the next time it's
3345 	 * called and stmmac_tx_clean may clean up to this descriptor.
3346 	 */
3347 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3348 
3349 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3350 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3351 			  __func__);
3352 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3353 	}
3354 
3355 	dev->stats.tx_bytes += skb->len;
3356 	priv->xstats.tx_tso_frames++;
3357 	priv->xstats.tx_tso_nfrags += nfrags;
3358 
3359 	if (priv->sarc_type)
3360 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3361 
3362 	skb_tx_timestamp(skb);
3363 
3364 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3365 		     priv->hwts_tx_en)) {
3366 		/* declare that device is doing timestamping */
3367 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3368 		stmmac_enable_tx_timestamp(priv, first);
3369 	}
3370 
3371 	/* Complete the first descriptor before granting the DMA */
3372 	stmmac_prepare_tso_tx_desc(priv, first, 1,
3373 			proto_hdr_len,
3374 			pay_len,
3375 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3376 			hdr / 4, (skb->len - proto_hdr_len));
3377 
3378 	/* If context desc is used to change MSS */
3379 	if (mss_desc) {
3380 		/* Make sure that first descriptor has been completely
3381 		 * written, including its own bit. This is because MSS is
3382 		 * actually before first descriptor, so we need to make
3383 		 * sure that MSS's own bit is the last thing written.
3384 		 */
3385 		dma_wmb();
3386 		stmmac_set_tx_owner(priv, mss_desc);
3387 	}
3388 
3389 	/* The own bit must be the latest setting done when prepare the
3390 	 * descriptor and then barrier is needed to make sure that
3391 	 * all is coherent before granting the DMA engine.
3392 	 */
3393 	wmb();
3394 
3395 	if (netif_msg_pktdata(priv)) {
3396 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3397 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3398 			tx_q->cur_tx, first, nfrags);
3399 		pr_info(">>> frame to be transmitted: ");
3400 		print_pkt(skb->data, skb_headlen(skb));
3401 	}
3402 
3403 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3404 
3405 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3406 		desc_size = sizeof(struct dma_edesc);
3407 	else
3408 		desc_size = sizeof(struct dma_desc);
3409 
3410 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3411 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3412 	stmmac_tx_timer_arm(priv, queue);
3413 
3414 	return NETDEV_TX_OK;
3415 
3416 dma_map_err:
3417 	dev_err(priv->device, "Tx dma map failed\n");
3418 	dev_kfree_skb(skb);
3419 	priv->dev->stats.tx_dropped++;
3420 	return NETDEV_TX_OK;
3421 }
3422 
3423 /**
3424  *  stmmac_xmit - Tx entry point of the driver
3425  *  @skb : the socket buffer
3426  *  @dev : device pointer
3427  *  Description : this is the tx entry point of the driver.
3428  *  It programs the chain or the ring and supports oversized frames
3429  *  and SG feature.
3430  */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)3431 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3432 {
3433 	unsigned int first_entry, tx_packets, enh_desc;
3434 	struct stmmac_priv *priv = netdev_priv(dev);
3435 	unsigned int nopaged_len = skb_headlen(skb);
3436 	int i, csum_insertion = 0, is_jumbo = 0;
3437 	u32 queue = skb_get_queue_mapping(skb);
3438 	int nfrags = skb_shinfo(skb)->nr_frags;
3439 	int gso = skb_shinfo(skb)->gso_type;
3440 	struct dma_edesc *tbs_desc = NULL;
3441 	int entry, desc_size, first_tx;
3442 	struct dma_desc *desc, *first;
3443 	struct stmmac_tx_queue *tx_q;
3444 	bool has_vlan, set_ic;
3445 	dma_addr_t des;
3446 
3447 	tx_q = &priv->tx_queue[queue];
3448 	first_tx = tx_q->cur_tx;
3449 
3450 	if (priv->tx_path_in_lpi_mode)
3451 		stmmac_disable_eee_mode(priv);
3452 
3453 	/* Manage oversized TCP frames for GMAC4 device */
3454 	if (skb_is_gso(skb) && priv->tso) {
3455 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3456 			return stmmac_tso_xmit(skb, dev);
3457 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
3458 			return stmmac_tso_xmit(skb, dev);
3459 	}
3460 
3461 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3462 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3463 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3464 								queue));
3465 			/* This is a hard error, log it. */
3466 			netdev_err(priv->dev,
3467 				   "%s: Tx Ring full when queue awake\n",
3468 				   __func__);
3469 		}
3470 		return NETDEV_TX_BUSY;
3471 	}
3472 
3473 	/* Check if VLAN can be inserted by HW */
3474 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3475 
3476 	entry = tx_q->cur_tx;
3477 	first_entry = entry;
3478 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3479 
3480 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3481 
3482 	if (likely(priv->extend_desc))
3483 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3484 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3485 		desc = &tx_q->dma_entx[entry].basic;
3486 	else
3487 		desc = tx_q->dma_tx + entry;
3488 
3489 	first = desc;
3490 
3491 	if (has_vlan)
3492 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3493 
3494 	enh_desc = priv->plat->enh_desc;
3495 	/* To program the descriptors according to the size of the frame */
3496 	if (enh_desc)
3497 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3498 
3499 	if (unlikely(is_jumbo)) {
3500 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3501 		if (unlikely(entry < 0) && (entry != -EINVAL))
3502 			goto dma_map_err;
3503 	}
3504 
3505 	for (i = 0; i < nfrags; i++) {
3506 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3507 		int len = skb_frag_size(frag);
3508 		bool last_segment = (i == (nfrags - 1));
3509 
3510 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3511 		WARN_ON(tx_q->tx_skbuff[entry]);
3512 
3513 		if (likely(priv->extend_desc))
3514 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3515 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3516 			desc = &tx_q->dma_entx[entry].basic;
3517 		else
3518 			desc = tx_q->dma_tx + entry;
3519 
3520 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3521 				       DMA_TO_DEVICE);
3522 		if (dma_mapping_error(priv->device, des))
3523 			goto dma_map_err; /* should reuse desc w/o issues */
3524 
3525 		tx_q->tx_skbuff_dma[entry].buf = des;
3526 
3527 		stmmac_set_desc_addr(priv, desc, des);
3528 
3529 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3530 		tx_q->tx_skbuff_dma[entry].len = len;
3531 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3532 
3533 		/* Prepare the descriptor and set the own bit too */
3534 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3535 				priv->mode, 1, last_segment, skb->len);
3536 	}
3537 
3538 	/* Only the last descriptor gets to point to the skb. */
3539 	tx_q->tx_skbuff[entry] = skb;
3540 
3541 	/* According to the coalesce parameter the IC bit for the latest
3542 	 * segment is reset and the timer re-started to clean the tx status.
3543 	 * This approach takes care about the fragments: desc is the first
3544 	 * element in case of no SG.
3545 	 */
3546 	tx_packets = (entry + 1) - first_tx;
3547 	tx_q->tx_count_frames += tx_packets;
3548 
3549 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3550 		set_ic = true;
3551 	else if (!priv->tx_coal_frames)
3552 		set_ic = false;
3553 	else if (tx_packets > priv->tx_coal_frames)
3554 		set_ic = true;
3555 	else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3556 		set_ic = true;
3557 	else
3558 		set_ic = false;
3559 
3560 	if (set_ic) {
3561 		if (likely(priv->extend_desc))
3562 			desc = &tx_q->dma_etx[entry].basic;
3563 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3564 			desc = &tx_q->dma_entx[entry].basic;
3565 		else
3566 			desc = &tx_q->dma_tx[entry];
3567 
3568 		tx_q->tx_count_frames = 0;
3569 		stmmac_set_tx_ic(priv, desc);
3570 		priv->xstats.tx_set_ic_bit++;
3571 	}
3572 
3573 	/* We've used all descriptors we need for this skb, however,
3574 	 * advance cur_tx so that it references a fresh descriptor.
3575 	 * ndo_start_xmit will fill this descriptor the next time it's
3576 	 * called and stmmac_tx_clean may clean up to this descriptor.
3577 	 */
3578 	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3579 	tx_q->cur_tx = entry;
3580 
3581 	if (netif_msg_pktdata(priv)) {
3582 		netdev_dbg(priv->dev,
3583 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3584 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3585 			   entry, first, nfrags);
3586 
3587 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3588 		print_pkt(skb->data, skb->len);
3589 	}
3590 
3591 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3592 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3593 			  __func__);
3594 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3595 	}
3596 
3597 	dev->stats.tx_bytes += skb->len;
3598 
3599 	if (priv->sarc_type)
3600 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3601 
3602 	skb_tx_timestamp(skb);
3603 
3604 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3605 	 * problems because all the descriptors are actually ready to be
3606 	 * passed to the DMA engine.
3607 	 */
3608 	if (likely(!is_jumbo)) {
3609 		bool last_segment = (nfrags == 0);
3610 
3611 		des = dma_map_single(priv->device, skb->data,
3612 				     nopaged_len, DMA_TO_DEVICE);
3613 		if (dma_mapping_error(priv->device, des))
3614 			goto dma_map_err;
3615 
3616 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3617 
3618 		stmmac_set_desc_addr(priv, first, des);
3619 
3620 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3621 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3622 
3623 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3624 			     priv->hwts_tx_en)) {
3625 			/* declare that device is doing timestamping */
3626 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3627 			stmmac_enable_tx_timestamp(priv, first);
3628 		}
3629 
3630 		/* Prepare the first descriptor setting the OWN bit too */
3631 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3632 				csum_insertion, priv->mode, 0, last_segment,
3633 				skb->len);
3634 	}
3635 
3636 	if (tx_q->tbs & STMMAC_TBS_EN) {
3637 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
3638 
3639 		tbs_desc = &tx_q->dma_entx[first_entry];
3640 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
3641 	}
3642 
3643 	stmmac_set_tx_owner(priv, first);
3644 
3645 	/* The own bit must be the latest setting done when prepare the
3646 	 * descriptor and then barrier is needed to make sure that
3647 	 * all is coherent before granting the DMA engine.
3648 	 */
3649 	wmb();
3650 
3651 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3652 
3653 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
3654 
3655 	if (likely(priv->extend_desc))
3656 		desc_size = sizeof(struct dma_extended_desc);
3657 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3658 		desc_size = sizeof(struct dma_edesc);
3659 	else
3660 		desc_size = sizeof(struct dma_desc);
3661 
3662 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3663 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3664 	stmmac_tx_timer_arm(priv, queue);
3665 
3666 	return NETDEV_TX_OK;
3667 
3668 dma_map_err:
3669 	netdev_err(priv->dev, "Tx DMA map failed\n");
3670 	dev_kfree_skb(skb);
3671 	priv->dev->stats.tx_dropped++;
3672 	return NETDEV_TX_OK;
3673 }
3674 
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)3675 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3676 {
3677 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
3678 	__be16 vlan_proto = veth->h_vlan_proto;
3679 	u16 vlanid;
3680 
3681 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3682 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3683 	    (vlan_proto == htons(ETH_P_8021AD) &&
3684 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3685 		/* pop the vlan tag */
3686 		vlanid = ntohs(veth->h_vlan_TCI);
3687 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3688 		skb_pull(skb, VLAN_HLEN);
3689 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3690 	}
3691 }
3692 
3693 /**
3694  * stmmac_rx_refill - refill used skb preallocated buffers
3695  * @priv: driver private structure
3696  * @queue: RX queue index
3697  * Description : this is to reallocate the skb for the reception process
3698  * that is based on zero-copy.
3699  */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)3700 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3701 {
3702 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3703 	int len, dirty = stmmac_rx_dirty(priv, queue);
3704 	unsigned int entry = rx_q->dirty_rx;
3705 
3706 	len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
3707 
3708 	while (dirty-- > 0) {
3709 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3710 		struct dma_desc *p;
3711 		bool use_rx_wd;
3712 
3713 		if (priv->extend_desc)
3714 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3715 		else
3716 			p = rx_q->dma_rx + entry;
3717 
3718 		if (!buf->page) {
3719 			buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
3720 			if (!buf->page)
3721 				break;
3722 		}
3723 
3724 		if (priv->sph && !buf->sec_page) {
3725 			buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
3726 			if (!buf->sec_page)
3727 				break;
3728 
3729 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
3730 
3731 			dma_sync_single_for_device(priv->device, buf->sec_addr,
3732 						   len, DMA_FROM_DEVICE);
3733 		}
3734 
3735 		buf->addr = page_pool_get_dma_addr(buf->page);
3736 
3737 		/* Sync whole allocation to device. This will invalidate old
3738 		 * data.
3739 		 */
3740 		dma_sync_single_for_device(priv->device, buf->addr, len,
3741 					   DMA_FROM_DEVICE);
3742 
3743 		stmmac_set_desc_addr(priv, p, buf->addr);
3744 		if (priv->sph)
3745 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
3746 		else
3747 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
3748 		stmmac_refill_desc3(priv, rx_q, p);
3749 
3750 		rx_q->rx_count_frames++;
3751 		rx_q->rx_count_frames += priv->rx_coal_frames;
3752 		if (rx_q->rx_count_frames > priv->rx_coal_frames)
3753 			rx_q->rx_count_frames = 0;
3754 
3755 		use_rx_wd = !priv->rx_coal_frames;
3756 		use_rx_wd |= rx_q->rx_count_frames > 0;
3757 		if (!priv->use_riwt)
3758 			use_rx_wd = false;
3759 
3760 		dma_wmb();
3761 		stmmac_set_rx_owner(priv, p, use_rx_wd);
3762 
3763 		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
3764 	}
3765 	rx_q->dirty_rx = entry;
3766 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3767 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
3768 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3769 }
3770 
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)3771 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
3772 				       struct dma_desc *p,
3773 				       int status, unsigned int len)
3774 {
3775 	unsigned int plen = 0, hlen = 0;
3776 	int coe = priv->hw->rx_csum;
3777 
3778 	/* Not first descriptor, buffer is always zero */
3779 	if (priv->sph && len)
3780 		return 0;
3781 
3782 	/* First descriptor, get split header length */
3783 	stmmac_get_rx_header_len(priv, p, &hlen);
3784 	if (priv->sph && hlen) {
3785 		priv->xstats.rx_split_hdr_pkt_n++;
3786 		return hlen;
3787 	}
3788 
3789 	/* First descriptor, not last descriptor and not split header */
3790 	if (status & rx_not_ls)
3791 		return priv->dma_buf_sz;
3792 
3793 	plen = stmmac_get_rx_frame_len(priv, p, coe);
3794 
3795 	/* First descriptor and last descriptor and not split header */
3796 	return min_t(unsigned int, priv->dma_buf_sz, plen);
3797 }
3798 
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)3799 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
3800 				       struct dma_desc *p,
3801 				       int status, unsigned int len)
3802 {
3803 	int coe = priv->hw->rx_csum;
3804 	unsigned int plen = 0;
3805 
3806 	/* Not split header, buffer is not available */
3807 	if (!priv->sph)
3808 		return 0;
3809 
3810 	/* Not last descriptor */
3811 	if (status & rx_not_ls)
3812 		return priv->dma_buf_sz;
3813 
3814 	plen = stmmac_get_rx_frame_len(priv, p, coe);
3815 
3816 	/* Last descriptor */
3817 	return plen - len;
3818 }
3819 
3820 /**
3821  * stmmac_rx - manage the receive process
3822  * @priv: driver private structure
3823  * @limit: napi bugget
3824  * @queue: RX queue index.
3825  * Description :  this the function called by the napi poll method.
3826  * It gets all the frames inside the ring.
3827  */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)3828 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3829 {
3830 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3831 	struct stmmac_channel *ch = &priv->channel[queue];
3832 	unsigned int count = 0, error = 0, len = 0;
3833 	int status = 0, coe = priv->hw->rx_csum;
3834 	unsigned int next_entry = rx_q->cur_rx;
3835 	unsigned int desc_size;
3836 	struct sk_buff *skb = NULL;
3837 
3838 	if (netif_msg_rx_status(priv)) {
3839 		void *rx_head;
3840 
3841 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3842 		if (priv->extend_desc) {
3843 			rx_head = (void *)rx_q->dma_erx;
3844 			desc_size = sizeof(struct dma_extended_desc);
3845 		} else {
3846 			rx_head = (void *)rx_q->dma_rx;
3847 			desc_size = sizeof(struct dma_desc);
3848 		}
3849 
3850 		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
3851 				    rx_q->dma_rx_phy, desc_size);
3852 	}
3853 	while (count < limit) {
3854 		unsigned int buf1_len = 0, buf2_len = 0;
3855 		enum pkt_hash_types hash_type;
3856 		struct stmmac_rx_buffer *buf;
3857 		struct dma_desc *np, *p;
3858 		int entry;
3859 		u32 hash;
3860 
3861 		if (!count && rx_q->state_saved) {
3862 			skb = rx_q->state.skb;
3863 			error = rx_q->state.error;
3864 			len = rx_q->state.len;
3865 		} else {
3866 			rx_q->state_saved = false;
3867 			skb = NULL;
3868 			error = 0;
3869 			len = 0;
3870 		}
3871 
3872 read_again:
3873 		if (count >= limit)
3874 			break;
3875 
3876 		buf1_len = 0;
3877 		buf2_len = 0;
3878 		entry = next_entry;
3879 		buf = &rx_q->buf_pool[entry];
3880 
3881 		if (priv->extend_desc)
3882 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3883 		else
3884 			p = rx_q->dma_rx + entry;
3885 
3886 		/* read the status of the incoming frame */
3887 		status = stmmac_rx_status(priv, &priv->dev->stats,
3888 				&priv->xstats, p);
3889 		/* check if managed by the DMA otherwise go ahead */
3890 		if (unlikely(status & dma_own))
3891 			break;
3892 
3893 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
3894 						priv->dma_rx_size);
3895 		next_entry = rx_q->cur_rx;
3896 
3897 		if (priv->extend_desc)
3898 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3899 		else
3900 			np = rx_q->dma_rx + next_entry;
3901 
3902 		prefetch(np);
3903 
3904 		if (priv->extend_desc)
3905 			stmmac_rx_extended_status(priv, &priv->dev->stats,
3906 					&priv->xstats, rx_q->dma_erx + entry);
3907 		if (unlikely(status == discard_frame)) {
3908 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
3909 			buf->page = NULL;
3910 			error = 1;
3911 			if (!priv->hwts_rx_en)
3912 				priv->dev->stats.rx_errors++;
3913 		}
3914 
3915 		if (unlikely(error && (status & rx_not_ls)))
3916 			goto read_again;
3917 		if (unlikely(error)) {
3918 			dev_kfree_skb(skb);
3919 			skb = NULL;
3920 			count++;
3921 			continue;
3922 		}
3923 
3924 		/* Buffer is good. Go on. */
3925 
3926 		prefetch(page_address(buf->page));
3927 		if (buf->sec_page)
3928 			prefetch(page_address(buf->sec_page));
3929 
3930 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
3931 		len += buf1_len;
3932 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
3933 		len += buf2_len;
3934 
3935 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3936 		 * Type frames (LLC/LLC-SNAP)
3937 		 *
3938 		 * llc_snap is never checked in GMAC >= 4, so this ACS
3939 		 * feature is always disabled and packets need to be
3940 		 * stripped manually.
3941 		 */
3942 		if (likely(!(status & rx_not_ls)) &&
3943 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3944 		     unlikely(status != llc_snap))) {
3945 			if (buf2_len)
3946 				buf2_len -= ETH_FCS_LEN;
3947 			else
3948 				buf1_len -= ETH_FCS_LEN;
3949 
3950 			len -= ETH_FCS_LEN;
3951 		}
3952 
3953 		if (!skb) {
3954 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
3955 			if (!skb) {
3956 				priv->dev->stats.rx_dropped++;
3957 				count++;
3958 				goto drain_data;
3959 			}
3960 
3961 			dma_sync_single_for_cpu(priv->device, buf->addr,
3962 						buf1_len, DMA_FROM_DEVICE);
3963 			skb_copy_to_linear_data(skb, page_address(buf->page),
3964 						buf1_len);
3965 			skb_put(skb, buf1_len);
3966 
3967 			/* Data payload copied into SKB, page ready for recycle */
3968 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
3969 			buf->page = NULL;
3970 		} else if (buf1_len) {
3971 			dma_sync_single_for_cpu(priv->device, buf->addr,
3972 						buf1_len, DMA_FROM_DEVICE);
3973 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3974 					buf->page, 0, buf1_len,
3975 					priv->dma_buf_sz);
3976 
3977 			/* Data payload appended into SKB */
3978 			page_pool_release_page(rx_q->page_pool, buf->page);
3979 			buf->page = NULL;
3980 		}
3981 
3982 		if (buf2_len) {
3983 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
3984 						buf2_len, DMA_FROM_DEVICE);
3985 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3986 					buf->sec_page, 0, buf2_len,
3987 					priv->dma_buf_sz);
3988 
3989 			/* Data payload appended into SKB */
3990 			page_pool_release_page(rx_q->page_pool, buf->sec_page);
3991 			buf->sec_page = NULL;
3992 		}
3993 
3994 drain_data:
3995 		if (likely(status & rx_not_ls))
3996 			goto read_again;
3997 		if (!skb)
3998 			continue;
3999 
4000 		/* Got entire packet into SKB. Finish it. */
4001 
4002 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
4003 		stmmac_rx_vlan(priv->dev, skb);
4004 		skb->protocol = eth_type_trans(skb, priv->dev);
4005 
4006 		if (unlikely(!coe))
4007 			skb_checksum_none_assert(skb);
4008 		else
4009 			skb->ip_summed = CHECKSUM_UNNECESSARY;
4010 
4011 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4012 			skb_set_hash(skb, hash, hash_type);
4013 
4014 		skb_record_rx_queue(skb, queue);
4015 		napi_gro_receive(&ch->rx_napi, skb);
4016 		skb = NULL;
4017 
4018 		priv->dev->stats.rx_packets++;
4019 		priv->dev->stats.rx_bytes += len;
4020 		count++;
4021 	}
4022 
4023 	if (status & rx_not_ls || skb) {
4024 		rx_q->state_saved = true;
4025 		rx_q->state.skb = skb;
4026 		rx_q->state.error = error;
4027 		rx_q->state.len = len;
4028 	}
4029 
4030 	stmmac_rx_refill(priv, queue);
4031 
4032 	priv->xstats.rx_pkt_n += count;
4033 
4034 	return count;
4035 }
4036 
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)4037 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
4038 {
4039 	struct stmmac_channel *ch =
4040 		container_of(napi, struct stmmac_channel, rx_napi);
4041 	struct stmmac_priv *priv = ch->priv_data;
4042 	u32 chan = ch->index;
4043 	int work_done;
4044 
4045 	priv->xstats.napi_poll++;
4046 
4047 	work_done = stmmac_rx(priv, budget, chan);
4048 	if (work_done < budget && napi_complete_done(napi, work_done)) {
4049 		unsigned long flags;
4050 
4051 		spin_lock_irqsave(&ch->lock, flags);
4052 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
4053 		spin_unlock_irqrestore(&ch->lock, flags);
4054 	}
4055 
4056 	return work_done;
4057 }
4058 
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)4059 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
4060 {
4061 	struct stmmac_channel *ch =
4062 		container_of(napi, struct stmmac_channel, tx_napi);
4063 	struct stmmac_priv *priv = ch->priv_data;
4064 	u32 chan = ch->index;
4065 	int work_done;
4066 
4067 	priv->xstats.napi_poll++;
4068 
4069 	work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan);
4070 	work_done = min(work_done, budget);
4071 
4072 	if (work_done < budget && napi_complete_done(napi, work_done)) {
4073 		unsigned long flags;
4074 
4075 		spin_lock_irqsave(&ch->lock, flags);
4076 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
4077 		spin_unlock_irqrestore(&ch->lock, flags);
4078 	}
4079 
4080 	return work_done;
4081 }
4082 
4083 /**
4084  *  stmmac_tx_timeout
4085  *  @dev : Pointer to net device structure
4086  *  @txqueue: the index of the hanging transmit queue
4087  *  Description: this function is called when a packet transmission fails to
4088  *   complete within a reasonable time. The driver will mark the error in the
4089  *   netdev structure and arrange for the device to be reset to a sane state
4090  *   in order to transmit a new packet.
4091  */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)4092 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
4093 {
4094 	struct stmmac_priv *priv = netdev_priv(dev);
4095 
4096 	stmmac_global_err(priv);
4097 }
4098 
4099 /**
4100  *  stmmac_set_rx_mode - entry point for multicast addressing
4101  *  @dev : pointer to the device structure
4102  *  Description:
4103  *  This function is a driver entry point which gets called by the kernel
4104  *  whenever multicast addresses must be enabled/disabled.
4105  *  Return value:
4106  *  void.
4107  */
stmmac_set_rx_mode(struct net_device * dev)4108 static void stmmac_set_rx_mode(struct net_device *dev)
4109 {
4110 	struct stmmac_priv *priv = netdev_priv(dev);
4111 
4112 	stmmac_set_filter(priv, priv->hw, dev);
4113 }
4114 
4115 /**
4116  *  stmmac_change_mtu - entry point to change MTU size for the device.
4117  *  @dev : device pointer.
4118  *  @new_mtu : the new MTU size for the device.
4119  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
4120  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
4121  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
4122  *  Return value:
4123  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4124  *  file on failure.
4125  */
stmmac_change_mtu(struct net_device * dev,int new_mtu)4126 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
4127 {
4128 	struct stmmac_priv *priv = netdev_priv(dev);
4129 	int txfifosz = priv->plat->tx_fifo_size;
4130 	const int mtu = new_mtu;
4131 
4132 	if (txfifosz == 0)
4133 		txfifosz = priv->dma_cap.tx_fifo_size;
4134 
4135 	txfifosz /= priv->plat->tx_queues_to_use;
4136 
4137 	if (netif_running(dev)) {
4138 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
4139 		return -EBUSY;
4140 	}
4141 
4142 	new_mtu = STMMAC_ALIGN(new_mtu);
4143 
4144 	/* If condition true, FIFO is too small or MTU too large */
4145 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
4146 		return -EINVAL;
4147 
4148 	dev->mtu = mtu;
4149 
4150 	netdev_update_features(dev);
4151 
4152 	return 0;
4153 }
4154 
stmmac_fix_features(struct net_device * dev,netdev_features_t features)4155 static netdev_features_t stmmac_fix_features(struct net_device *dev,
4156 					     netdev_features_t features)
4157 {
4158 	struct stmmac_priv *priv = netdev_priv(dev);
4159 
4160 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
4161 		features &= ~NETIF_F_RXCSUM;
4162 
4163 	if (!priv->plat->tx_coe)
4164 		features &= ~NETIF_F_CSUM_MASK;
4165 
4166 	/* Some GMAC devices have a bugged Jumbo frame support that
4167 	 * needs to have the Tx COE disabled for oversized frames
4168 	 * (due to limited buffer sizes). In this case we disable
4169 	 * the TX csum insertion in the TDES and not use SF.
4170 	 */
4171 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
4172 		features &= ~NETIF_F_CSUM_MASK;
4173 
4174 	/* Disable tso if asked by ethtool */
4175 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4176 		if (features & NETIF_F_TSO)
4177 			priv->tso = true;
4178 		else
4179 			priv->tso = false;
4180 	}
4181 
4182 	return features;
4183 }
4184 
stmmac_set_features(struct net_device * netdev,netdev_features_t features)4185 static int stmmac_set_features(struct net_device *netdev,
4186 			       netdev_features_t features)
4187 {
4188 	struct stmmac_priv *priv = netdev_priv(netdev);
4189 	bool sph_en;
4190 	u32 chan;
4191 
4192 	/* Keep the COE Type in case of csum is supporting */
4193 	if (features & NETIF_F_RXCSUM)
4194 		priv->hw->rx_csum = priv->plat->rx_coe;
4195 	else
4196 		priv->hw->rx_csum = 0;
4197 	/* No check needed because rx_coe has been set before and it will be
4198 	 * fixed in case of issue.
4199 	 */
4200 	stmmac_rx_ipc(priv, priv->hw);
4201 
4202 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
4203 	for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
4204 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
4205 
4206 	return 0;
4207 }
4208 
4209 /**
4210  *  stmmac_interrupt - main ISR
4211  *  @irq: interrupt number.
4212  *  @dev_id: to pass the net device pointer (must be valid).
4213  *  Description: this is the main driver interrupt service routine.
4214  *  It can call:
4215  *  o DMA service routine (to manage incoming frame reception and transmission
4216  *    status)
4217  *  o Core interrupts to manage: remote wake-up, management counter, LPI
4218  *    interrupts.
4219  */
stmmac_interrupt(int irq,void * dev_id)4220 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
4221 {
4222 	struct net_device *dev = (struct net_device *)dev_id;
4223 	struct stmmac_priv *priv = netdev_priv(dev);
4224 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4225 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4226 	u32 queues_count;
4227 	u32 queue;
4228 	bool xmac;
4229 
4230 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
4231 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
4232 
4233 	if (priv->irq_wake)
4234 		pm_wakeup_event(priv->device, 0);
4235 
4236 	/* Check if adapter is up */
4237 	if (test_bit(STMMAC_DOWN, &priv->state))
4238 		return IRQ_HANDLED;
4239 	/* Check if a fatal error happened */
4240 	if (stmmac_safety_feat_interrupt(priv))
4241 		return IRQ_HANDLED;
4242 
4243 	/* To handle GMAC own interrupts */
4244 	if ((priv->plat->has_gmac) || xmac) {
4245 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
4246 
4247 		if (unlikely(status)) {
4248 			/* For LPI we need to save the tx status */
4249 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
4250 				priv->tx_path_in_lpi_mode = true;
4251 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
4252 				priv->tx_path_in_lpi_mode = false;
4253 		}
4254 
4255 		for (queue = 0; queue < queues_count; queue++) {
4256 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
4257 							    queue);
4258 		}
4259 
4260 		/* PCS link status */
4261 		if (priv->hw->pcs) {
4262 			if (priv->xstats.pcs_link)
4263 				netif_carrier_on(dev);
4264 			else
4265 				netif_carrier_off(dev);
4266 		}
4267 	}
4268 
4269 	/* To handle DMA interrupts */
4270 	stmmac_dma_interrupt(priv);
4271 
4272 	return IRQ_HANDLED;
4273 }
4274 
4275 #ifdef CONFIG_NET_POLL_CONTROLLER
4276 /* Polling receive - used by NETCONSOLE and other diagnostic tools
4277  * to allow network I/O with interrupts disabled.
4278  */
stmmac_poll_controller(struct net_device * dev)4279 static void stmmac_poll_controller(struct net_device *dev)
4280 {
4281 	disable_irq(dev->irq);
4282 	stmmac_interrupt(dev->irq, dev);
4283 	enable_irq(dev->irq);
4284 }
4285 #endif
4286 
4287 /**
4288  *  stmmac_ioctl - Entry point for the Ioctl
4289  *  @dev: Device pointer.
4290  *  @rq: An IOCTL specefic structure, that can contain a pointer to
4291  *  a proprietary structure used to pass information to the driver.
4292  *  @cmd: IOCTL command
4293  *  Description:
4294  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
4295  */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)4296 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4297 {
4298 	struct stmmac_priv *priv = netdev_priv (dev);
4299 	int ret = -EOPNOTSUPP;
4300 
4301 	if (!netif_running(dev))
4302 		return -EINVAL;
4303 
4304 	switch (cmd) {
4305 	case SIOCGMIIPHY:
4306 	case SIOCGMIIREG:
4307 	case SIOCSMIIREG:
4308 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
4309 		break;
4310 	case SIOCSHWTSTAMP:
4311 		ret = stmmac_hwtstamp_set(dev, rq);
4312 		break;
4313 	case SIOCGHWTSTAMP:
4314 		ret = stmmac_hwtstamp_get(dev, rq);
4315 		break;
4316 	default:
4317 		break;
4318 	}
4319 
4320 	return ret;
4321 }
4322 
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)4323 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4324 				    void *cb_priv)
4325 {
4326 	struct stmmac_priv *priv = cb_priv;
4327 	int ret = -EOPNOTSUPP;
4328 
4329 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4330 		return ret;
4331 
4332 	stmmac_disable_all_queues(priv);
4333 
4334 	switch (type) {
4335 	case TC_SETUP_CLSU32:
4336 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
4337 		break;
4338 	case TC_SETUP_CLSFLOWER:
4339 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
4340 		break;
4341 	default:
4342 		break;
4343 	}
4344 
4345 	stmmac_enable_all_queues(priv);
4346 	return ret;
4347 }
4348 
4349 static LIST_HEAD(stmmac_block_cb_list);
4350 
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)4351 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
4352 			   void *type_data)
4353 {
4354 	struct stmmac_priv *priv = netdev_priv(ndev);
4355 
4356 	switch (type) {
4357 	case TC_SETUP_BLOCK:
4358 		return flow_block_cb_setup_simple(type_data,
4359 						  &stmmac_block_cb_list,
4360 						  stmmac_setup_tc_block_cb,
4361 						  priv, priv, true);
4362 	case TC_SETUP_QDISC_CBS:
4363 		return stmmac_tc_setup_cbs(priv, priv, type_data);
4364 	case TC_SETUP_QDISC_TAPRIO:
4365 		return stmmac_tc_setup_taprio(priv, priv, type_data);
4366 	case TC_SETUP_QDISC_ETF:
4367 		return stmmac_tc_setup_etf(priv, priv, type_data);
4368 	default:
4369 		return -EOPNOTSUPP;
4370 	}
4371 }
4372 
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4373 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
4374 			       struct net_device *sb_dev)
4375 {
4376 	int gso = skb_shinfo(skb)->gso_type;
4377 
4378 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
4379 		/*
4380 		 * There is no way to determine the number of TSO/USO
4381 		 * capable Queues. Let's use always the Queue 0
4382 		 * because if TSO/USO is supported then at least this
4383 		 * one will be capable.
4384 		 */
4385 		return 0;
4386 	}
4387 
4388 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
4389 }
4390 
stmmac_set_mac_address(struct net_device * ndev,void * addr)4391 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
4392 {
4393 	struct stmmac_priv *priv = netdev_priv(ndev);
4394 	int ret = 0;
4395 
4396 	ret = pm_runtime_get_sync(priv->device);
4397 	if (ret < 0) {
4398 		pm_runtime_put_noidle(priv->device);
4399 		return ret;
4400 	}
4401 
4402 	ret = eth_mac_addr(ndev, addr);
4403 	if (ret)
4404 		goto set_mac_error;
4405 
4406 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4407 
4408 set_mac_error:
4409 	pm_runtime_put(priv->device);
4410 
4411 	return ret;
4412 }
4413 
4414 #ifdef CONFIG_DEBUG_FS
4415 static struct dentry *stmmac_fs_dir;
4416 
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)4417 static void sysfs_display_ring(void *head, int size, int extend_desc,
4418 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
4419 {
4420 	int i;
4421 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4422 	struct dma_desc *p = (struct dma_desc *)head;
4423 	dma_addr_t dma_addr;
4424 
4425 	for (i = 0; i < size; i++) {
4426 		if (extend_desc) {
4427 			dma_addr = dma_phy_addr + i * sizeof(*ep);
4428 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4429 				   i, &dma_addr,
4430 				   le32_to_cpu(ep->basic.des0),
4431 				   le32_to_cpu(ep->basic.des1),
4432 				   le32_to_cpu(ep->basic.des2),
4433 				   le32_to_cpu(ep->basic.des3));
4434 			ep++;
4435 		} else {
4436 			dma_addr = dma_phy_addr + i * sizeof(*p);
4437 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4438 				   i, &dma_addr,
4439 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4440 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4441 			p++;
4442 		}
4443 		seq_printf(seq, "\n");
4444 	}
4445 }
4446 
stmmac_rings_status_show(struct seq_file * seq,void * v)4447 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4448 {
4449 	struct net_device *dev = seq->private;
4450 	struct stmmac_priv *priv = netdev_priv(dev);
4451 	u32 rx_count = priv->plat->rx_queues_to_use;
4452 	u32 tx_count = priv->plat->tx_queues_to_use;
4453 	u32 queue;
4454 
4455 	if ((dev->flags & IFF_UP) == 0)
4456 		return 0;
4457 
4458 	for (queue = 0; queue < rx_count; queue++) {
4459 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4460 
4461 		seq_printf(seq, "RX Queue %d:\n", queue);
4462 
4463 		if (priv->extend_desc) {
4464 			seq_printf(seq, "Extended descriptor ring:\n");
4465 			sysfs_display_ring((void *)rx_q->dma_erx,
4466 					   priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
4467 		} else {
4468 			seq_printf(seq, "Descriptor ring:\n");
4469 			sysfs_display_ring((void *)rx_q->dma_rx,
4470 					   priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
4471 		}
4472 	}
4473 
4474 	for (queue = 0; queue < tx_count; queue++) {
4475 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4476 
4477 		seq_printf(seq, "TX Queue %d:\n", queue);
4478 
4479 		if (priv->extend_desc) {
4480 			seq_printf(seq, "Extended descriptor ring:\n");
4481 			sysfs_display_ring((void *)tx_q->dma_etx,
4482 					   priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
4483 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
4484 			seq_printf(seq, "Descriptor ring:\n");
4485 			sysfs_display_ring((void *)tx_q->dma_tx,
4486 					   priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
4487 		}
4488 	}
4489 
4490 	return 0;
4491 }
4492 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
4493 
stmmac_dma_cap_show(struct seq_file * seq,void * v)4494 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4495 {
4496 	struct net_device *dev = seq->private;
4497 	struct stmmac_priv *priv = netdev_priv(dev);
4498 
4499 	if (!priv->hw_cap_support) {
4500 		seq_printf(seq, "DMA HW features not supported\n");
4501 		return 0;
4502 	}
4503 
4504 	seq_printf(seq, "==============================\n");
4505 	seq_printf(seq, "\tDMA HW features\n");
4506 	seq_printf(seq, "==============================\n");
4507 
4508 	seq_printf(seq, "\t10/100 Mbps: %s\n",
4509 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
4510 	seq_printf(seq, "\t1000 Mbps: %s\n",
4511 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
4512 	seq_printf(seq, "\tHalf duplex: %s\n",
4513 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
4514 	seq_printf(seq, "\tHash Filter: %s\n",
4515 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
4516 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
4517 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
4518 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4519 		   (priv->dma_cap.pcs) ? "Y" : "N");
4520 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
4521 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
4522 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
4523 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4524 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
4525 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4526 	seq_printf(seq, "\tRMON module: %s\n",
4527 		   (priv->dma_cap.rmon) ? "Y" : "N");
4528 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4529 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
4530 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4531 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
4532 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4533 		   (priv->dma_cap.eee) ? "Y" : "N");
4534 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4535 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4536 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
4537 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4538 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4539 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
4540 	} else {
4541 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4542 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4543 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4544 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4545 	}
4546 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4547 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4548 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4549 		   priv->dma_cap.number_rx_channel);
4550 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4551 		   priv->dma_cap.number_tx_channel);
4552 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
4553 		   priv->dma_cap.number_rx_queues);
4554 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
4555 		   priv->dma_cap.number_tx_queues);
4556 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
4557 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
4558 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
4559 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
4560 	seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
4561 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
4562 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
4563 		   priv->dma_cap.pps_out_num);
4564 	seq_printf(seq, "\tSafety Features: %s\n",
4565 		   priv->dma_cap.asp ? "Y" : "N");
4566 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
4567 		   priv->dma_cap.frpsel ? "Y" : "N");
4568 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
4569 		   priv->dma_cap.addr64);
4570 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
4571 		   priv->dma_cap.rssen ? "Y" : "N");
4572 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
4573 		   priv->dma_cap.vlhash ? "Y" : "N");
4574 	seq_printf(seq, "\tSplit Header: %s\n",
4575 		   priv->dma_cap.sphen ? "Y" : "N");
4576 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
4577 		   priv->dma_cap.vlins ? "Y" : "N");
4578 	seq_printf(seq, "\tDouble VLAN: %s\n",
4579 		   priv->dma_cap.dvlan ? "Y" : "N");
4580 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
4581 		   priv->dma_cap.l3l4fnum);
4582 	seq_printf(seq, "\tARP Offloading: %s\n",
4583 		   priv->dma_cap.arpoffsel ? "Y" : "N");
4584 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
4585 		   priv->dma_cap.estsel ? "Y" : "N");
4586 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
4587 		   priv->dma_cap.fpesel ? "Y" : "N");
4588 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
4589 		   priv->dma_cap.tbssel ? "Y" : "N");
4590 	return 0;
4591 }
4592 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4593 
4594 /* Use network device events to rename debugfs file entries.
4595  */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)4596 static int stmmac_device_event(struct notifier_block *unused,
4597 			       unsigned long event, void *ptr)
4598 {
4599 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4600 	struct stmmac_priv *priv = netdev_priv(dev);
4601 
4602 	if (dev->netdev_ops != &stmmac_netdev_ops)
4603 		goto done;
4604 
4605 	switch (event) {
4606 	case NETDEV_CHANGENAME:
4607 		if (priv->dbgfs_dir)
4608 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
4609 							 priv->dbgfs_dir,
4610 							 stmmac_fs_dir,
4611 							 dev->name);
4612 		break;
4613 	}
4614 done:
4615 	return NOTIFY_DONE;
4616 }
4617 
4618 static struct notifier_block stmmac_notifier = {
4619 	.notifier_call = stmmac_device_event,
4620 };
4621 
stmmac_init_fs(struct net_device * dev)4622 static void stmmac_init_fs(struct net_device *dev)
4623 {
4624 	struct stmmac_priv *priv = netdev_priv(dev);
4625 
4626 	rtnl_lock();
4627 
4628 	/* Create per netdev entries */
4629 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4630 
4631 	/* Entry to report DMA RX/TX rings */
4632 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
4633 			    &stmmac_rings_status_fops);
4634 
4635 	/* Entry to report the DMA HW features */
4636 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
4637 			    &stmmac_dma_cap_fops);
4638 
4639 	rtnl_unlock();
4640 }
4641 
stmmac_exit_fs(struct net_device * dev)4642 static void stmmac_exit_fs(struct net_device *dev)
4643 {
4644 	struct stmmac_priv *priv = netdev_priv(dev);
4645 
4646 	debugfs_remove_recursive(priv->dbgfs_dir);
4647 }
4648 #endif /* CONFIG_DEBUG_FS */
4649 
stmmac_vid_crc32_le(__le16 vid_le)4650 static u32 stmmac_vid_crc32_le(__le16 vid_le)
4651 {
4652 	unsigned char *data = (unsigned char *)&vid_le;
4653 	unsigned char data_byte = 0;
4654 	u32 crc = ~0x0;
4655 	u32 temp = 0;
4656 	int i, bits;
4657 
4658 	bits = get_bitmask_order(VLAN_VID_MASK);
4659 	for (i = 0; i < bits; i++) {
4660 		if ((i % 8) == 0)
4661 			data_byte = data[i / 8];
4662 
4663 		temp = ((crc & 1) ^ data_byte) & 1;
4664 		crc >>= 1;
4665 		data_byte >>= 1;
4666 
4667 		if (temp)
4668 			crc ^= 0xedb88320;
4669 	}
4670 
4671 	return crc;
4672 }
4673 
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)4674 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
4675 {
4676 	u32 crc, hash = 0;
4677 	__le16 pmatch = 0;
4678 	int count = 0;
4679 	u16 vid = 0;
4680 
4681 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
4682 		__le16 vid_le = cpu_to_le16(vid);
4683 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
4684 		hash |= (1 << crc);
4685 		count++;
4686 	}
4687 
4688 	if (!priv->dma_cap.vlhash) {
4689 		if (count > 2) /* VID = 0 always passes filter */
4690 			return -EOPNOTSUPP;
4691 
4692 		pmatch = cpu_to_le16(vid);
4693 		hash = 0;
4694 	}
4695 
4696 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
4697 }
4698 
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)4699 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
4700 {
4701 	struct stmmac_priv *priv = netdev_priv(ndev);
4702 	bool is_double = false;
4703 	int ret;
4704 
4705 	if (be16_to_cpu(proto) == ETH_P_8021AD)
4706 		is_double = true;
4707 
4708 	set_bit(vid, priv->active_vlans);
4709 	ret = stmmac_vlan_update(priv, is_double);
4710 	if (ret) {
4711 		clear_bit(vid, priv->active_vlans);
4712 		return ret;
4713 	}
4714 
4715 	if (priv->hw->num_vlan) {
4716 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4717 		if (ret)
4718 			return ret;
4719 	}
4720 
4721 	return 0;
4722 }
4723 
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)4724 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
4725 {
4726 	struct stmmac_priv *priv = netdev_priv(ndev);
4727 	bool is_double = false;
4728 	int ret;
4729 
4730 	ret = pm_runtime_get_sync(priv->device);
4731 	if (ret < 0) {
4732 		pm_runtime_put_noidle(priv->device);
4733 		return ret;
4734 	}
4735 
4736 	if (be16_to_cpu(proto) == ETH_P_8021AD)
4737 		is_double = true;
4738 
4739 	clear_bit(vid, priv->active_vlans);
4740 
4741 	if (priv->hw->num_vlan) {
4742 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4743 		if (ret)
4744 			goto del_vlan_error;
4745 	}
4746 
4747 	ret = stmmac_vlan_update(priv, is_double);
4748 
4749 del_vlan_error:
4750 	pm_runtime_put(priv->device);
4751 
4752 	return ret;
4753 }
4754 
4755 static const struct net_device_ops stmmac_netdev_ops = {
4756 	.ndo_open = stmmac_open,
4757 	.ndo_start_xmit = stmmac_xmit,
4758 	.ndo_stop = stmmac_release,
4759 	.ndo_change_mtu = stmmac_change_mtu,
4760 	.ndo_fix_features = stmmac_fix_features,
4761 	.ndo_set_features = stmmac_set_features,
4762 	.ndo_set_rx_mode = stmmac_set_rx_mode,
4763 	.ndo_tx_timeout = stmmac_tx_timeout,
4764 	.ndo_do_ioctl = stmmac_ioctl,
4765 	.ndo_setup_tc = stmmac_setup_tc,
4766 	.ndo_select_queue = stmmac_select_queue,
4767 #ifdef CONFIG_NET_POLL_CONTROLLER
4768 	.ndo_poll_controller = stmmac_poll_controller,
4769 #endif
4770 	.ndo_set_mac_address = stmmac_set_mac_address,
4771 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
4772 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
4773 };
4774 
stmmac_reset_subtask(struct stmmac_priv * priv)4775 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4776 {
4777 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4778 		return;
4779 	if (test_bit(STMMAC_DOWN, &priv->state))
4780 		return;
4781 
4782 	netdev_err(priv->dev, "Reset adapter.\n");
4783 
4784 	rtnl_lock();
4785 	netif_trans_update(priv->dev);
4786 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4787 		usleep_range(1000, 2000);
4788 
4789 	set_bit(STMMAC_DOWN, &priv->state);
4790 	dev_close(priv->dev);
4791 	dev_open(priv->dev, NULL);
4792 	clear_bit(STMMAC_DOWN, &priv->state);
4793 	clear_bit(STMMAC_RESETING, &priv->state);
4794 	rtnl_unlock();
4795 }
4796 
stmmac_service_task(struct work_struct * work)4797 static void stmmac_service_task(struct work_struct *work)
4798 {
4799 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4800 			service_task);
4801 
4802 	stmmac_reset_subtask(priv);
4803 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4804 }
4805 
4806 /**
4807  *  stmmac_hw_init - Init the MAC device
4808  *  @priv: driver private structure
4809  *  Description: this function is to configure the MAC device according to
4810  *  some platform parameters or the HW capability register. It prepares the
4811  *  driver to use either ring or chain modes and to setup either enhanced or
4812  *  normal descriptors.
4813  */
stmmac_hw_init(struct stmmac_priv * priv)4814 static int stmmac_hw_init(struct stmmac_priv *priv)
4815 {
4816 	int ret;
4817 
4818 	/* dwmac-sun8i only work in chain mode */
4819 	if (priv->plat->has_sun8i)
4820 		chain_mode = 1;
4821 	priv->chain_mode = chain_mode;
4822 
4823 	/* Initialize HW Interface */
4824 	ret = stmmac_hwif_init(priv);
4825 	if (ret)
4826 		return ret;
4827 
4828 	/* Get the HW capability (new GMAC newer than 3.50a) */
4829 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4830 	if (priv->hw_cap_support) {
4831 		dev_info(priv->device, "DMA HW capability register supported\n");
4832 
4833 		/* We can override some gmac/dma configuration fields: e.g.
4834 		 * enh_desc, tx_coe (e.g. that are passed through the
4835 		 * platform) with the values from the HW capability
4836 		 * register (if supported).
4837 		 */
4838 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4839 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4840 		priv->hw->pmt = priv->plat->pmt;
4841 		if (priv->dma_cap.hash_tb_sz) {
4842 			priv->hw->multicast_filter_bins =
4843 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
4844 			priv->hw->mcast_bits_log2 =
4845 					ilog2(priv->hw->multicast_filter_bins);
4846 		}
4847 
4848 		/* TXCOE doesn't work in thresh DMA mode */
4849 		if (priv->plat->force_thresh_dma_mode)
4850 			priv->plat->tx_coe = 0;
4851 		else
4852 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4853 
4854 		/* In case of GMAC4 rx_coe is from HW cap register. */
4855 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4856 
4857 		if (priv->dma_cap.rx_coe_type2)
4858 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4859 		else if (priv->dma_cap.rx_coe_type1)
4860 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4861 
4862 	} else {
4863 		dev_info(priv->device, "No HW DMA feature register supported\n");
4864 	}
4865 
4866 	if (priv->plat->rx_coe) {
4867 		priv->hw->rx_csum = priv->plat->rx_coe;
4868 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4869 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4870 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4871 	}
4872 	if (priv->plat->tx_coe)
4873 		dev_info(priv->device, "TX Checksum insertion supported\n");
4874 
4875 	if (priv->plat->pmt) {
4876 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4877 		device_set_wakeup_capable(priv->device, 1);
4878 	}
4879 
4880 	if (priv->dma_cap.tsoen)
4881 		dev_info(priv->device, "TSO supported\n");
4882 
4883 	priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
4884 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
4885 
4886 	/* Run HW quirks, if any */
4887 	if (priv->hwif_quirks) {
4888 		ret = priv->hwif_quirks(priv);
4889 		if (ret)
4890 			return ret;
4891 	}
4892 
4893 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4894 	 * In some case, for example on bugged HW this feature
4895 	 * has to be disable and this can be done by passing the
4896 	 * riwt_off field from the platform.
4897 	 */
4898 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4899 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4900 		priv->use_riwt = 1;
4901 		dev_info(priv->device,
4902 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4903 	}
4904 
4905 	return 0;
4906 }
4907 
stmmac_napi_add(struct net_device * dev)4908 static void stmmac_napi_add(struct net_device *dev)
4909 {
4910 	struct stmmac_priv *priv = netdev_priv(dev);
4911 	u32 queue, maxq;
4912 
4913 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4914 
4915 	for (queue = 0; queue < maxq; queue++) {
4916 		struct stmmac_channel *ch = &priv->channel[queue];
4917 
4918 		ch->priv_data = priv;
4919 		ch->index = queue;
4920 		spin_lock_init(&ch->lock);
4921 
4922 		if (queue < priv->plat->rx_queues_to_use) {
4923 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
4924 				       NAPI_POLL_WEIGHT);
4925 		}
4926 		if (queue < priv->plat->tx_queues_to_use) {
4927 			netif_tx_napi_add(dev, &ch->tx_napi,
4928 					  stmmac_napi_poll_tx,
4929 					  NAPI_POLL_WEIGHT);
4930 		}
4931 	}
4932 }
4933 
stmmac_napi_del(struct net_device * dev)4934 static void stmmac_napi_del(struct net_device *dev)
4935 {
4936 	struct stmmac_priv *priv = netdev_priv(dev);
4937 	u32 queue, maxq;
4938 
4939 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4940 
4941 	for (queue = 0; queue < maxq; queue++) {
4942 		struct stmmac_channel *ch = &priv->channel[queue];
4943 
4944 		if (queue < priv->plat->rx_queues_to_use)
4945 			netif_napi_del(&ch->rx_napi);
4946 		if (queue < priv->plat->tx_queues_to_use)
4947 			netif_napi_del(&ch->tx_napi);
4948 	}
4949 }
4950 
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)4951 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
4952 {
4953 	struct stmmac_priv *priv = netdev_priv(dev);
4954 	int ret = 0, i;
4955 
4956 	if (netif_running(dev))
4957 		stmmac_release(dev);
4958 
4959 	stmmac_napi_del(dev);
4960 
4961 	priv->plat->rx_queues_to_use = rx_cnt;
4962 	priv->plat->tx_queues_to_use = tx_cnt;
4963 	if (!netif_is_rxfh_configured(dev))
4964 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
4965 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
4966 									rx_cnt);
4967 
4968 	stmmac_napi_add(dev);
4969 
4970 	if (netif_running(dev))
4971 		ret = stmmac_open(dev);
4972 
4973 	return ret;
4974 }
4975 
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)4976 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
4977 {
4978 	struct stmmac_priv *priv = netdev_priv(dev);
4979 	int ret = 0;
4980 
4981 	if (netif_running(dev))
4982 		stmmac_release(dev);
4983 
4984 	priv->dma_rx_size = rx_size;
4985 	priv->dma_tx_size = tx_size;
4986 
4987 	if (netif_running(dev))
4988 		ret = stmmac_open(dev);
4989 
4990 	return ret;
4991 }
4992 
4993 /**
4994  * stmmac_dvr_probe
4995  * @device: device pointer
4996  * @plat_dat: platform data pointer
4997  * @res: stmmac resource pointer
4998  * Description: this is the main probe function used to
4999  * call the alloc_etherdev, allocate the priv structure.
5000  * Return:
5001  * returns 0 on success, otherwise errno.
5002  */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)5003 int stmmac_dvr_probe(struct device *device,
5004 		     struct plat_stmmacenet_data *plat_dat,
5005 		     struct stmmac_resources *res)
5006 {
5007 	struct net_device *ndev = NULL;
5008 	struct stmmac_priv *priv;
5009 	u32 rxq;
5010 	int i, ret = 0;
5011 
5012 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
5013 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
5014 	if (!ndev)
5015 		return -ENOMEM;
5016 
5017 	SET_NETDEV_DEV(ndev, device);
5018 
5019 	priv = netdev_priv(ndev);
5020 	priv->device = device;
5021 	priv->dev = ndev;
5022 
5023 	stmmac_set_ethtool_ops(ndev);
5024 	priv->pause = pause;
5025 	priv->plat = plat_dat;
5026 	priv->ioaddr = res->addr;
5027 	priv->dev->base_addr = (unsigned long)res->addr;
5028 
5029 	priv->dev->irq = res->irq;
5030 	priv->wol_irq = res->wol_irq;
5031 	priv->lpi_irq = res->lpi_irq;
5032 
5033 	if (!IS_ERR_OR_NULL(res->mac))
5034 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
5035 
5036 	dev_set_drvdata(device, priv->dev);
5037 
5038 	/* Verify driver arguments */
5039 	stmmac_verify_args();
5040 
5041 	/* Allocate workqueue */
5042 	priv->wq = create_singlethread_workqueue("stmmac_wq");
5043 	if (!priv->wq) {
5044 		dev_err(priv->device, "failed to create workqueue\n");
5045 		return -ENOMEM;
5046 	}
5047 
5048 	INIT_WORK(&priv->service_task, stmmac_service_task);
5049 
5050 	/* Override with kernel parameters if supplied XXX CRS XXX
5051 	 * this needs to have multiple instances
5052 	 */
5053 	if ((phyaddr >= 0) && (phyaddr <= 31))
5054 		priv->plat->phy_addr = phyaddr;
5055 
5056 	if (priv->plat->stmmac_rst) {
5057 		ret = reset_control_assert(priv->plat->stmmac_rst);
5058 		reset_control_deassert(priv->plat->stmmac_rst);
5059 		/* Some reset controllers have only reset callback instead of
5060 		 * assert + deassert callbacks pair.
5061 		 */
5062 		if (ret == -ENOTSUPP)
5063 			reset_control_reset(priv->plat->stmmac_rst);
5064 	}
5065 
5066 	/* Init MAC and get the capabilities */
5067 	ret = stmmac_hw_init(priv);
5068 	if (ret)
5069 		goto error_hw_init;
5070 
5071 	stmmac_check_ether_addr(priv);
5072 
5073 	ndev->netdev_ops = &stmmac_netdev_ops;
5074 
5075 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5076 			    NETIF_F_RXCSUM;
5077 
5078 	ret = stmmac_tc_init(priv, priv);
5079 	if (!ret) {
5080 		ndev->hw_features |= NETIF_F_HW_TC;
5081 	}
5082 
5083 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5084 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
5085 		if (priv->plat->has_gmac4)
5086 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
5087 		priv->tso = true;
5088 		dev_info(priv->device, "TSO feature enabled\n");
5089 	}
5090 
5091 	if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
5092 		ndev->hw_features |= NETIF_F_GRO;
5093 		priv->sph = true;
5094 		dev_info(priv->device, "SPH feature enabled\n");
5095 	}
5096 
5097 	/* The current IP register MAC_HW_Feature1[ADDR64] only define
5098 	 * 32/40/64 bit width, but some SOC support others like i.MX8MP
5099 	 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
5100 	 * So overwrite dma_cap.addr64 according to HW real design.
5101 	 */
5102 	if (priv->plat->addr64)
5103 		priv->dma_cap.addr64 = priv->plat->addr64;
5104 
5105 	if (priv->dma_cap.addr64) {
5106 		ret = dma_set_mask_and_coherent(device,
5107 				DMA_BIT_MASK(priv->dma_cap.addr64));
5108 		if (!ret) {
5109 			dev_info(priv->device, "Using %d bits DMA width\n",
5110 				 priv->dma_cap.addr64);
5111 
5112 			/*
5113 			 * If more than 32 bits can be addressed, make sure to
5114 			 * enable enhanced addressing mode.
5115 			 */
5116 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
5117 				priv->plat->dma_cfg->eame = true;
5118 		} else {
5119 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
5120 			if (ret) {
5121 				dev_err(priv->device, "Failed to set DMA Mask\n");
5122 				goto error_hw_init;
5123 			}
5124 
5125 			priv->dma_cap.addr64 = 32;
5126 		}
5127 	}
5128 
5129 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
5130 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
5131 #ifdef STMMAC_VLAN_TAG_USED
5132 	/* Both mac100 and gmac support receive VLAN tag detection */
5133 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
5134 	if (priv->dma_cap.vlhash) {
5135 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5136 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
5137 	}
5138 	if (priv->dma_cap.vlins) {
5139 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
5140 		if (priv->dma_cap.dvlan)
5141 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
5142 	}
5143 #endif
5144 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
5145 
5146 	/* Initialize RSS */
5147 	rxq = priv->plat->rx_queues_to_use;
5148 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
5149 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
5150 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
5151 
5152 	if (priv->dma_cap.rssen && priv->plat->rss_en)
5153 		ndev->features |= NETIF_F_RXHASH;
5154 
5155 	/* MTU range: 46 - hw-specific max */
5156 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
5157 	if (priv->plat->has_xgmac)
5158 		ndev->max_mtu = XGMAC_JUMBO_LEN;
5159 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
5160 		ndev->max_mtu = JUMBO_LEN;
5161 	else
5162 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
5163 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
5164 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
5165 	 */
5166 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
5167 	    (priv->plat->maxmtu >= ndev->min_mtu))
5168 		ndev->max_mtu = priv->plat->maxmtu;
5169 	else if (priv->plat->maxmtu < ndev->min_mtu)
5170 		dev_warn(priv->device,
5171 			 "%s: warning: maxmtu having invalid value (%d)\n",
5172 			 __func__, priv->plat->maxmtu);
5173 
5174 	if (flow_ctrl)
5175 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
5176 
5177 	/* Setup channels NAPI */
5178 	stmmac_napi_add(ndev);
5179 
5180 	mutex_init(&priv->lock);
5181 
5182 	/* If a specific clk_csr value is passed from the platform
5183 	 * this means that the CSR Clock Range selection cannot be
5184 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
5185 	 * set the MDC clock dynamically according to the csr actual
5186 	 * clock input.
5187 	 */
5188 	if (priv->plat->clk_csr >= 0)
5189 		priv->clk_csr = priv->plat->clk_csr;
5190 	else
5191 		stmmac_clk_csr_set(priv);
5192 
5193 	stmmac_check_pcs_mode(priv);
5194 
5195 	pm_runtime_get_noresume(device);
5196 	pm_runtime_set_active(device);
5197 	if (!pm_runtime_enabled(device))
5198 		pm_runtime_enable(device);
5199 
5200 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
5201 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
5202 		/* MDIO bus Registration */
5203 		ret = stmmac_mdio_register(ndev);
5204 		if (ret < 0) {
5205 			dev_err_probe(priv->device, ret,
5206 				      "%s: MDIO bus (id: %d) registration failed\n",
5207 				      __func__, priv->plat->bus_id);
5208 			goto error_mdio_register;
5209 		}
5210 	}
5211 
5212 	ret = stmmac_phy_setup(priv);
5213 	if (ret) {
5214 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
5215 		goto error_phy_setup;
5216 	}
5217 
5218 	ret = register_netdev(ndev);
5219 	if (ret) {
5220 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
5221 			__func__, ret);
5222 		goto error_netdev_register;
5223 	}
5224 
5225 #ifdef CONFIG_DEBUG_FS
5226 	stmmac_init_fs(ndev);
5227 #endif
5228 
5229 	/* Let pm_runtime_put() disable the clocks.
5230 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
5231 	 */
5232 	pm_runtime_put(device);
5233 
5234 	return ret;
5235 
5236 error_netdev_register:
5237 	phylink_destroy(priv->phylink);
5238 error_phy_setup:
5239 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
5240 	    priv->hw->pcs != STMMAC_PCS_RTBI)
5241 		stmmac_mdio_unregister(ndev);
5242 error_mdio_register:
5243 	stmmac_napi_del(ndev);
5244 error_hw_init:
5245 	destroy_workqueue(priv->wq);
5246 
5247 	return ret;
5248 }
5249 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
5250 
5251 /**
5252  * stmmac_dvr_remove
5253  * @dev: device pointer
5254  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
5255  * changes the link status, releases the DMA descriptor rings.
5256  */
stmmac_dvr_remove(struct device * dev)5257 int stmmac_dvr_remove(struct device *dev)
5258 {
5259 	struct net_device *ndev = dev_get_drvdata(dev);
5260 	struct stmmac_priv *priv = netdev_priv(ndev);
5261 
5262 	netdev_info(priv->dev, "%s: removing driver", __func__);
5263 
5264 	stmmac_stop_all_dma(priv);
5265 	stmmac_mac_set(priv, priv->ioaddr, false);
5266 	netif_carrier_off(ndev);
5267 	unregister_netdev(ndev);
5268 
5269 #ifdef CONFIG_DEBUG_FS
5270 	stmmac_exit_fs(ndev);
5271 #endif
5272 	phylink_destroy(priv->phylink);
5273 	if (priv->plat->stmmac_rst)
5274 		reset_control_assert(priv->plat->stmmac_rst);
5275 	pm_runtime_put(dev);
5276 	pm_runtime_disable(dev);
5277 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
5278 	    priv->hw->pcs != STMMAC_PCS_RTBI)
5279 		stmmac_mdio_unregister(ndev);
5280 	destroy_workqueue(priv->wq);
5281 	mutex_destroy(&priv->lock);
5282 
5283 	return 0;
5284 }
5285 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
5286 
5287 /**
5288  * stmmac_suspend - suspend callback
5289  * @dev: device pointer
5290  * Description: this is the function to suspend the device and it is called
5291  * by the platform driver to stop the network queue, release the resources,
5292  * program the PMT register (for WoL), clean and release driver resources.
5293  */
stmmac_suspend(struct device * dev)5294 int stmmac_suspend(struct device *dev)
5295 {
5296 	struct net_device *ndev = dev_get_drvdata(dev);
5297 	struct stmmac_priv *priv = netdev_priv(ndev);
5298 	u32 chan;
5299 
5300 	if (!ndev || !netif_running(ndev))
5301 		return 0;
5302 
5303 	phylink_mac_change(priv->phylink, false);
5304 
5305 	mutex_lock(&priv->lock);
5306 
5307 	netif_device_detach(ndev);
5308 
5309 	stmmac_disable_all_queues(priv);
5310 
5311 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
5312 		del_timer_sync(&priv->tx_queue[chan].txtimer);
5313 
5314 	if (priv->eee_enabled) {
5315 		priv->tx_path_in_lpi_mode = false;
5316 		del_timer_sync(&priv->eee_ctrl_timer);
5317 	}
5318 
5319 	/* Stop TX/RX DMA */
5320 	stmmac_stop_all_dma(priv);
5321 
5322 	if (priv->plat->serdes_powerdown)
5323 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5324 
5325 	/* Enable Power down mode by programming the PMT regs */
5326 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5327 		stmmac_pmt(priv, priv->hw, priv->wolopts);
5328 		priv->irq_wake = 1;
5329 	} else {
5330 		mutex_unlock(&priv->lock);
5331 		rtnl_lock();
5332 		if (device_may_wakeup(priv->device))
5333 			phylink_speed_down(priv->phylink, false);
5334 		phylink_stop(priv->phylink);
5335 		rtnl_unlock();
5336 		mutex_lock(&priv->lock);
5337 
5338 		stmmac_mac_set(priv, priv->ioaddr, false);
5339 		pinctrl_pm_select_sleep_state(priv->device);
5340 	}
5341 	mutex_unlock(&priv->lock);
5342 
5343 	priv->speed = SPEED_UNKNOWN;
5344 	return 0;
5345 }
5346 EXPORT_SYMBOL_GPL(stmmac_suspend);
5347 
5348 /**
5349  * stmmac_reset_queues_param - reset queue parameters
5350  * @priv: device pointer
5351  */
stmmac_reset_queues_param(struct stmmac_priv * priv)5352 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
5353 {
5354 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5355 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5356 	u32 queue;
5357 
5358 	for (queue = 0; queue < rx_cnt; queue++) {
5359 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5360 
5361 		rx_q->cur_rx = 0;
5362 		rx_q->dirty_rx = 0;
5363 	}
5364 
5365 	for (queue = 0; queue < tx_cnt; queue++) {
5366 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5367 
5368 		tx_q->cur_tx = 0;
5369 		tx_q->dirty_tx = 0;
5370 		tx_q->mss = 0;
5371 
5372 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
5373 	}
5374 }
5375 
5376 /**
5377  * stmmac_resume - resume callback
5378  * @dev: device pointer
5379  * Description: when resume this function is invoked to setup the DMA and CORE
5380  * in a usable state.
5381  */
stmmac_resume(struct device * dev)5382 int stmmac_resume(struct device *dev)
5383 {
5384 	struct net_device *ndev = dev_get_drvdata(dev);
5385 	struct stmmac_priv *priv = netdev_priv(ndev);
5386 	int ret;
5387 
5388 	if (!netif_running(ndev))
5389 		return 0;
5390 
5391 	/* Power Down bit, into the PM register, is cleared
5392 	 * automatically as soon as a magic packet or a Wake-up frame
5393 	 * is received. Anyway, it's better to manually clear
5394 	 * this bit because it can generate problems while resuming
5395 	 * from another devices (e.g. serial console).
5396 	 */
5397 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5398 		mutex_lock(&priv->lock);
5399 		stmmac_pmt(priv, priv->hw, 0);
5400 		mutex_unlock(&priv->lock);
5401 		priv->irq_wake = 0;
5402 	} else {
5403 		pinctrl_pm_select_default_state(priv->device);
5404 		/* reset the phy so that it's ready */
5405 		if (priv->mii)
5406 			stmmac_mdio_reset(priv->mii);
5407 	}
5408 
5409 	if (priv->plat->serdes_powerup) {
5410 		ret = priv->plat->serdes_powerup(ndev,
5411 						 priv->plat->bsp_priv);
5412 
5413 		if (ret < 0)
5414 			return ret;
5415 	}
5416 
5417 	if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
5418 		rtnl_lock();
5419 		phylink_start(priv->phylink);
5420 		/* We may have called phylink_speed_down before */
5421 		phylink_speed_up(priv->phylink);
5422 		rtnl_unlock();
5423 	}
5424 
5425 	rtnl_lock();
5426 	mutex_lock(&priv->lock);
5427 
5428 	stmmac_reset_queues_param(priv);
5429 
5430 	stmmac_free_tx_skbufs(priv);
5431 	stmmac_clear_descriptors(priv);
5432 
5433 	stmmac_hw_setup(ndev, false);
5434 	stmmac_init_coalesce(priv);
5435 	stmmac_set_rx_mode(ndev);
5436 
5437 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
5438 
5439 	stmmac_enable_all_queues(priv);
5440 
5441 	mutex_unlock(&priv->lock);
5442 	rtnl_unlock();
5443 
5444 	phylink_mac_change(priv->phylink, true);
5445 
5446 	netif_device_attach(ndev);
5447 
5448 	return 0;
5449 }
5450 EXPORT_SYMBOL_GPL(stmmac_resume);
5451 
5452 #ifndef MODULE
stmmac_cmdline_opt(char * str)5453 static int __init stmmac_cmdline_opt(char *str)
5454 {
5455 	char *opt;
5456 
5457 	if (!str || !*str)
5458 		return 1;
5459 	while ((opt = strsep(&str, ",")) != NULL) {
5460 		if (!strncmp(opt, "debug:", 6)) {
5461 			if (kstrtoint(opt + 6, 0, &debug))
5462 				goto err;
5463 		} else if (!strncmp(opt, "phyaddr:", 8)) {
5464 			if (kstrtoint(opt + 8, 0, &phyaddr))
5465 				goto err;
5466 		} else if (!strncmp(opt, "buf_sz:", 7)) {
5467 			if (kstrtoint(opt + 7, 0, &buf_sz))
5468 				goto err;
5469 		} else if (!strncmp(opt, "tc:", 3)) {
5470 			if (kstrtoint(opt + 3, 0, &tc))
5471 				goto err;
5472 		} else if (!strncmp(opt, "watchdog:", 9)) {
5473 			if (kstrtoint(opt + 9, 0, &watchdog))
5474 				goto err;
5475 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
5476 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
5477 				goto err;
5478 		} else if (!strncmp(opt, "pause:", 6)) {
5479 			if (kstrtoint(opt + 6, 0, &pause))
5480 				goto err;
5481 		} else if (!strncmp(opt, "eee_timer:", 10)) {
5482 			if (kstrtoint(opt + 10, 0, &eee_timer))
5483 				goto err;
5484 		} else if (!strncmp(opt, "chain_mode:", 11)) {
5485 			if (kstrtoint(opt + 11, 0, &chain_mode))
5486 				goto err;
5487 		}
5488 	}
5489 	return 1;
5490 
5491 err:
5492 	pr_err("%s: ERROR broken module parameter conversion", __func__);
5493 	return 1;
5494 }
5495 
5496 __setup("stmmaceth=", stmmac_cmdline_opt);
5497 #endif /* MODULE */
5498 
stmmac_init(void)5499 static int __init stmmac_init(void)
5500 {
5501 #ifdef CONFIG_DEBUG_FS
5502 	/* Create debugfs main directory if it doesn't exist yet */
5503 	if (!stmmac_fs_dir)
5504 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
5505 	register_netdevice_notifier(&stmmac_notifier);
5506 #endif
5507 
5508 	return 0;
5509 }
5510 
stmmac_exit(void)5511 static void __exit stmmac_exit(void)
5512 {
5513 #ifdef CONFIG_DEBUG_FS
5514 	unregister_netdevice_notifier(&stmmac_notifier);
5515 	debugfs_remove_recursive(stmmac_fs_dir);
5516 #endif
5517 }
5518 
5519 module_init(stmmac_init)
5520 module_exit(stmmac_exit)
5521 
5522 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
5523 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
5524 MODULE_LICENSE("GPL");
5525