1 /*******************************************************************************
2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3 ST Ethernet IPs are built around a Synopsys IP Core.
4
5 Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 The full GNU General Public License is included in this distribution in
17 the file called "COPYING".
18
19 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21 Documentation available at:
22 http://www.stlinux.com
23 Support available at:
24 https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "dwxgmac2.h"
55 #include "hwif.h"
56
57 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
58 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
59
60 /* Module parameters */
61 #define TX_TIMEO 5000
62 static int watchdog = TX_TIMEO;
63 module_param(watchdog, int, 0644);
64 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
65
66 static int debug = -1;
67 module_param(debug, int, 0644);
68 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
69
70 static int phyaddr = -1;
71 module_param(phyaddr, int, 0444);
72 MODULE_PARM_DESC(phyaddr, "Physical device address");
73
74 #define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
75 #define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
76
77 static int flow_ctrl = FLOW_OFF;
78 module_param(flow_ctrl, int, 0644);
79 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
80
81 static int pause = PAUSE_TIME;
82 module_param(pause, int, 0644);
83 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
84
85 #define TC_DEFAULT 64
86 static int tc = TC_DEFAULT;
87 module_param(tc, int, 0644);
88 MODULE_PARM_DESC(tc, "DMA threshold control value");
89
90 #define DEFAULT_BUFSIZE 1536
91 static int buf_sz = DEFAULT_BUFSIZE;
92 module_param(buf_sz, int, 0644);
93 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
94
95 #define STMMAC_RX_COPYBREAK 256
96
97 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
98 NETIF_MSG_LINK | NETIF_MSG_IFUP |
99 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
100
101 #define STMMAC_DEFAULT_LPI_TIMER 1000
102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103 module_param(eee_timer, int, 0644);
104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
105 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
106
107 /* By default the driver will use the ring mode to manage tx and rx descriptors,
108 * but allow user to force to use the chain instead of the ring
109 */
110 static unsigned int chain_mode;
111 module_param(chain_mode, int, 0444);
112 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
113
114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
115
116 #ifdef CONFIG_DEBUG_FS
117 static int stmmac_init_fs(struct net_device *dev);
118 static void stmmac_exit_fs(struct net_device *dev);
119 #endif
120
121 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
122
123 /**
124 * stmmac_verify_args - verify the driver parameters.
125 * Description: it checks the driver parameters and set a default in case of
126 * errors.
127 */
stmmac_verify_args(void)128 static void stmmac_verify_args(void)
129 {
130 if (unlikely(watchdog < 0))
131 watchdog = TX_TIMEO;
132 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
133 buf_sz = DEFAULT_BUFSIZE;
134 if (unlikely(flow_ctrl > 1))
135 flow_ctrl = FLOW_AUTO;
136 else if (likely(flow_ctrl < 0))
137 flow_ctrl = FLOW_OFF;
138 if (unlikely((pause < 0) || (pause > 0xffff)))
139 pause = PAUSE_TIME;
140 if (eee_timer < 0)
141 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
142 }
143
144 /**
145 * stmmac_disable_all_queues - Disable all queues
146 * @priv: driver private structure
147 */
stmmac_disable_all_queues(struct stmmac_priv * priv)148 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
149 {
150 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
151 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
152 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
153 u32 queue;
154
155 for (queue = 0; queue < maxq; queue++) {
156 struct stmmac_channel *ch = &priv->channel[queue];
157
158 napi_disable(&ch->napi);
159 }
160 }
161
162 /**
163 * stmmac_enable_all_queues - Enable all queues
164 * @priv: driver private structure
165 */
stmmac_enable_all_queues(struct stmmac_priv * priv)166 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
167 {
168 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
169 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
170 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
171 u32 queue;
172
173 for (queue = 0; queue < maxq; queue++) {
174 struct stmmac_channel *ch = &priv->channel[queue];
175
176 napi_enable(&ch->napi);
177 }
178 }
179
stmmac_service_event_schedule(struct stmmac_priv * priv)180 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
181 {
182 if (!test_bit(STMMAC_DOWN, &priv->state) &&
183 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
184 queue_work(priv->wq, &priv->service_task);
185 }
186
stmmac_global_err(struct stmmac_priv * priv)187 static void stmmac_global_err(struct stmmac_priv *priv)
188 {
189 netif_carrier_off(priv->dev);
190 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
191 stmmac_service_event_schedule(priv);
192 }
193
194 /**
195 * stmmac_clk_csr_set - dynamically set the MDC clock
196 * @priv: driver private structure
197 * Description: this is to dynamically set the MDC clock according to the csr
198 * clock input.
199 * Note:
200 * If a specific clk_csr value is passed from the platform
201 * this means that the CSR Clock Range selection cannot be
202 * changed at run-time and it is fixed (as reported in the driver
203 * documentation). Viceversa the driver will try to set the MDC
204 * clock dynamically according to the actual clock input.
205 */
stmmac_clk_csr_set(struct stmmac_priv * priv)206 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
207 {
208 u32 clk_rate;
209
210 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
211
212 /* Platform provided default clk_csr would be assumed valid
213 * for all other cases except for the below mentioned ones.
214 * For values higher than the IEEE 802.3 specified frequency
215 * we can not estimate the proper divider as it is not known
216 * the frequency of clk_csr_i. So we do not change the default
217 * divider.
218 */
219 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
220 if (clk_rate < CSR_F_35M)
221 priv->clk_csr = STMMAC_CSR_20_35M;
222 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
223 priv->clk_csr = STMMAC_CSR_35_60M;
224 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
225 priv->clk_csr = STMMAC_CSR_60_100M;
226 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
227 priv->clk_csr = STMMAC_CSR_100_150M;
228 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
229 priv->clk_csr = STMMAC_CSR_150_250M;
230 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
231 priv->clk_csr = STMMAC_CSR_250_300M;
232 }
233
234 if (priv->plat->has_sun8i) {
235 if (clk_rate > 160000000)
236 priv->clk_csr = 0x03;
237 else if (clk_rate > 80000000)
238 priv->clk_csr = 0x02;
239 else if (clk_rate > 40000000)
240 priv->clk_csr = 0x01;
241 else
242 priv->clk_csr = 0;
243 }
244
245 if (priv->plat->has_xgmac) {
246 if (clk_rate > 400000000)
247 priv->clk_csr = 0x5;
248 else if (clk_rate > 350000000)
249 priv->clk_csr = 0x4;
250 else if (clk_rate > 300000000)
251 priv->clk_csr = 0x3;
252 else if (clk_rate > 250000000)
253 priv->clk_csr = 0x2;
254 else if (clk_rate > 150000000)
255 priv->clk_csr = 0x1;
256 else
257 priv->clk_csr = 0x0;
258 }
259 }
260
print_pkt(unsigned char * buf,int len)261 static void print_pkt(unsigned char *buf, int len)
262 {
263 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
264 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
265 }
266
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)267 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
268 {
269 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
270 u32 avail;
271
272 if (tx_q->dirty_tx > tx_q->cur_tx)
273 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
274 else
275 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
276
277 return avail;
278 }
279
280 /**
281 * stmmac_rx_dirty - Get RX queue dirty
282 * @priv: driver private structure
283 * @queue: RX queue index
284 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)285 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
286 {
287 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
288 u32 dirty;
289
290 if (rx_q->dirty_rx <= rx_q->cur_rx)
291 dirty = rx_q->cur_rx - rx_q->dirty_rx;
292 else
293 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
294
295 return dirty;
296 }
297
298 /**
299 * stmmac_hw_fix_mac_speed - callback for speed selection
300 * @priv: driver private structure
301 * Description: on some platforms (e.g. ST), some HW system configuration
302 * registers have to be set according to the link speed negotiated.
303 */
stmmac_hw_fix_mac_speed(struct stmmac_priv * priv)304 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
305 {
306 struct net_device *ndev = priv->dev;
307 struct phy_device *phydev = ndev->phydev;
308
309 if (likely(priv->plat->fix_mac_speed))
310 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
311 }
312
313 /**
314 * stmmac_enable_eee_mode - check and enter in LPI mode
315 * @priv: driver private structure
316 * Description: this function is to verify and enter in LPI mode in case of
317 * EEE.
318 */
stmmac_enable_eee_mode(struct stmmac_priv * priv)319 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
320 {
321 u32 tx_cnt = priv->plat->tx_queues_to_use;
322 u32 queue;
323
324 /* check if all TX queues have the work finished */
325 for (queue = 0; queue < tx_cnt; queue++) {
326 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
327
328 if (tx_q->dirty_tx != tx_q->cur_tx)
329 return; /* still unfinished work */
330 }
331
332 /* Check and enter in LPI mode */
333 if (!priv->tx_path_in_lpi_mode)
334 stmmac_set_eee_mode(priv, priv->hw,
335 priv->plat->en_tx_lpi_clockgating);
336 }
337
338 /**
339 * stmmac_disable_eee_mode - disable and exit from LPI mode
340 * @priv: driver private structure
341 * Description: this function is to exit and disable EEE in case of
342 * LPI state is true. This is called by the xmit.
343 */
stmmac_disable_eee_mode(struct stmmac_priv * priv)344 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
345 {
346 stmmac_reset_eee_mode(priv, priv->hw);
347 del_timer_sync(&priv->eee_ctrl_timer);
348 priv->tx_path_in_lpi_mode = false;
349 }
350
351 /**
352 * stmmac_eee_ctrl_timer - EEE TX SW timer.
353 * @arg : data hook
354 * Description:
355 * if there is no data transfer and if we are not in LPI state,
356 * then MAC Transmitter can be moved to LPI state.
357 */
stmmac_eee_ctrl_timer(struct timer_list * t)358 static void stmmac_eee_ctrl_timer(struct timer_list *t)
359 {
360 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
361
362 stmmac_enable_eee_mode(priv);
363 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
364 }
365
366 /**
367 * stmmac_eee_init - init EEE
368 * @priv: driver private structure
369 * Description:
370 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
371 * can also manage EEE, this function enable the LPI state and start related
372 * timer.
373 */
stmmac_eee_init(struct stmmac_priv * priv)374 bool stmmac_eee_init(struct stmmac_priv *priv)
375 {
376 struct net_device *ndev = priv->dev;
377 int interface = priv->plat->interface;
378 bool ret = false;
379
380 if ((interface != PHY_INTERFACE_MODE_MII) &&
381 (interface != PHY_INTERFACE_MODE_GMII) &&
382 !phy_interface_mode_is_rgmii(interface))
383 goto out;
384
385 /* Using PCS we cannot dial with the phy registers at this stage
386 * so we do not support extra feature like EEE.
387 */
388 if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
389 (priv->hw->pcs == STMMAC_PCS_TBI) ||
390 (priv->hw->pcs == STMMAC_PCS_RTBI))
391 goto out;
392
393 /* MAC core supports the EEE feature. */
394 if (priv->dma_cap.eee) {
395 int tx_lpi_timer = priv->tx_lpi_timer;
396
397 /* Check if the PHY supports EEE */
398 if (phy_init_eee(ndev->phydev, 1)) {
399 /* To manage at run-time if the EEE cannot be supported
400 * anymore (for example because the lp caps have been
401 * changed).
402 * In that case the driver disable own timers.
403 */
404 mutex_lock(&priv->lock);
405 if (priv->eee_active) {
406 netdev_dbg(priv->dev, "disable EEE\n");
407 del_timer_sync(&priv->eee_ctrl_timer);
408 stmmac_set_eee_timer(priv, priv->hw, 0,
409 tx_lpi_timer);
410 }
411 priv->eee_active = 0;
412 mutex_unlock(&priv->lock);
413 goto out;
414 }
415 /* Activate the EEE and start timers */
416 mutex_lock(&priv->lock);
417 if (!priv->eee_active) {
418 priv->eee_active = 1;
419 timer_setup(&priv->eee_ctrl_timer,
420 stmmac_eee_ctrl_timer, 0);
421 mod_timer(&priv->eee_ctrl_timer,
422 STMMAC_LPI_T(eee_timer));
423
424 stmmac_set_eee_timer(priv, priv->hw,
425 STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
426 }
427 /* Set HW EEE according to the speed */
428 stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
429
430 ret = true;
431 mutex_unlock(&priv->lock);
432
433 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
434 }
435 out:
436 return ret;
437 }
438
439 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
440 * @priv: driver private structure
441 * @p : descriptor pointer
442 * @skb : the socket buffer
443 * Description :
444 * This function will read timestamp from the descriptor & pass it to stack.
445 * and also perform some sanity checks.
446 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)447 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
448 struct dma_desc *p, struct sk_buff *skb)
449 {
450 struct skb_shared_hwtstamps shhwtstamp;
451 u64 ns = 0;
452
453 if (!priv->hwts_tx_en)
454 return;
455
456 /* exit if skb doesn't support hw tstamp */
457 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
458 return;
459
460 /* check tx tstamp status */
461 if (stmmac_get_tx_timestamp_status(priv, p)) {
462 /* get the valid tstamp */
463 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
464
465 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
466 shhwtstamp.hwtstamp = ns_to_ktime(ns);
467
468 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
469 /* pass tstamp to stack */
470 skb_tstamp_tx(skb, &shhwtstamp);
471 }
472
473 return;
474 }
475
476 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
477 * @priv: driver private structure
478 * @p : descriptor pointer
479 * @np : next descriptor pointer
480 * @skb : the socket buffer
481 * Description :
482 * This function will read received packet's timestamp from the descriptor
483 * and pass it to stack. It also perform some sanity checks.
484 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)485 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
486 struct dma_desc *np, struct sk_buff *skb)
487 {
488 struct skb_shared_hwtstamps *shhwtstamp = NULL;
489 struct dma_desc *desc = p;
490 u64 ns = 0;
491
492 if (!priv->hwts_rx_en)
493 return;
494 /* For GMAC4, the valid timestamp is from CTX next desc. */
495 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
496 desc = np;
497
498 /* Check if timestamp is available */
499 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
500 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
501 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
502 shhwtstamp = skb_hwtstamps(skb);
503 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
504 shhwtstamp->hwtstamp = ns_to_ktime(ns);
505 } else {
506 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
507 }
508 }
509
510 /**
511 * stmmac_hwtstamp_ioctl - control hardware timestamping.
512 * @dev: device pointer.
513 * @ifr: An IOCTL specific structure, that can contain a pointer to
514 * a proprietary structure used to pass information to the driver.
515 * Description:
516 * This function configures the MAC to enable/disable both outgoing(TX)
517 * and incoming(RX) packets time stamping based on user input.
518 * Return Value:
519 * 0 on success and an appropriate -ve integer on failure.
520 */
stmmac_hwtstamp_ioctl(struct net_device * dev,struct ifreq * ifr)521 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
522 {
523 struct stmmac_priv *priv = netdev_priv(dev);
524 struct hwtstamp_config config;
525 struct timespec64 now;
526 u64 temp = 0;
527 u32 ptp_v2 = 0;
528 u32 tstamp_all = 0;
529 u32 ptp_over_ipv4_udp = 0;
530 u32 ptp_over_ipv6_udp = 0;
531 u32 ptp_over_ethernet = 0;
532 u32 snap_type_sel = 0;
533 u32 ts_master_en = 0;
534 u32 ts_event_en = 0;
535 u32 sec_inc = 0;
536 u32 value = 0;
537 bool xmac;
538
539 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
540
541 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
542 netdev_alert(priv->dev, "No support for HW time stamping\n");
543 priv->hwts_tx_en = 0;
544 priv->hwts_rx_en = 0;
545
546 return -EOPNOTSUPP;
547 }
548
549 if (copy_from_user(&config, ifr->ifr_data,
550 sizeof(struct hwtstamp_config)))
551 return -EFAULT;
552
553 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
554 __func__, config.flags, config.tx_type, config.rx_filter);
555
556 /* reserved for future extensions */
557 if (config.flags)
558 return -EINVAL;
559
560 if (config.tx_type != HWTSTAMP_TX_OFF &&
561 config.tx_type != HWTSTAMP_TX_ON)
562 return -ERANGE;
563
564 if (priv->adv_ts) {
565 switch (config.rx_filter) {
566 case HWTSTAMP_FILTER_NONE:
567 /* time stamp no incoming packet at all */
568 config.rx_filter = HWTSTAMP_FILTER_NONE;
569 break;
570
571 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
572 /* PTP v1, UDP, any kind of event packet */
573 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
574 /* take time stamp for all event messages */
575 if (xmac)
576 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
577 else
578 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
579
580 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
581 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
582 break;
583
584 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
585 /* PTP v1, UDP, Sync packet */
586 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
587 /* take time stamp for SYNC messages only */
588 ts_event_en = PTP_TCR_TSEVNTENA;
589
590 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
591 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
592 break;
593
594 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
595 /* PTP v1, UDP, Delay_req packet */
596 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
597 /* take time stamp for Delay_Req messages only */
598 ts_master_en = PTP_TCR_TSMSTRENA;
599 ts_event_en = PTP_TCR_TSEVNTENA;
600
601 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
602 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
603 break;
604
605 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
606 /* PTP v2, UDP, any kind of event packet */
607 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
608 ptp_v2 = PTP_TCR_TSVER2ENA;
609 /* take time stamp for all event messages */
610 if (xmac)
611 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
612 else
613 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
614
615 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
616 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
617 break;
618
619 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
620 /* PTP v2, UDP, Sync packet */
621 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
622 ptp_v2 = PTP_TCR_TSVER2ENA;
623 /* take time stamp for SYNC messages only */
624 ts_event_en = PTP_TCR_TSEVNTENA;
625
626 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
627 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
628 break;
629
630 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
631 /* PTP v2, UDP, Delay_req packet */
632 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
633 ptp_v2 = PTP_TCR_TSVER2ENA;
634 /* take time stamp for Delay_Req messages only */
635 ts_master_en = PTP_TCR_TSMSTRENA;
636 ts_event_en = PTP_TCR_TSEVNTENA;
637
638 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
639 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
640 break;
641
642 case HWTSTAMP_FILTER_PTP_V2_EVENT:
643 /* PTP v2/802.AS1 any layer, any kind of event packet */
644 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
645 ptp_v2 = PTP_TCR_TSVER2ENA;
646 /* take time stamp for all event messages */
647 if (xmac)
648 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
649 else
650 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
651
652 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
653 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
654 ptp_over_ethernet = PTP_TCR_TSIPENA;
655 break;
656
657 case HWTSTAMP_FILTER_PTP_V2_SYNC:
658 /* PTP v2/802.AS1, any layer, Sync packet */
659 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
660 ptp_v2 = PTP_TCR_TSVER2ENA;
661 /* take time stamp for SYNC messages only */
662 ts_event_en = PTP_TCR_TSEVNTENA;
663
664 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
665 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
666 ptp_over_ethernet = PTP_TCR_TSIPENA;
667 break;
668
669 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
670 /* PTP v2/802.AS1, any layer, Delay_req packet */
671 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
672 ptp_v2 = PTP_TCR_TSVER2ENA;
673 /* take time stamp for Delay_Req messages only */
674 ts_master_en = PTP_TCR_TSMSTRENA;
675 ts_event_en = PTP_TCR_TSEVNTENA;
676
677 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 ptp_over_ethernet = PTP_TCR_TSIPENA;
680 break;
681
682 case HWTSTAMP_FILTER_NTP_ALL:
683 case HWTSTAMP_FILTER_ALL:
684 /* time stamp any incoming packet */
685 config.rx_filter = HWTSTAMP_FILTER_ALL;
686 tstamp_all = PTP_TCR_TSENALL;
687 break;
688
689 default:
690 return -ERANGE;
691 }
692 } else {
693 switch (config.rx_filter) {
694 case HWTSTAMP_FILTER_NONE:
695 config.rx_filter = HWTSTAMP_FILTER_NONE;
696 break;
697 default:
698 /* PTP v1, UDP, any kind of event packet */
699 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
700 break;
701 }
702 }
703 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
704 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
705
706 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
707 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
708 else {
709 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
710 tstamp_all | ptp_v2 | ptp_over_ethernet |
711 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
712 ts_master_en | snap_type_sel);
713 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
714
715 /* program Sub Second Increment reg */
716 stmmac_config_sub_second_increment(priv,
717 priv->ptpaddr, priv->plat->clk_ptp_rate,
718 xmac, &sec_inc);
719 temp = div_u64(1000000000ULL, sec_inc);
720
721 /* Store sub second increment and flags for later use */
722 priv->sub_second_inc = sec_inc;
723 priv->systime_flags = value;
724
725 /* calculate default added value:
726 * formula is :
727 * addend = (2^32)/freq_div_ratio;
728 * where, freq_div_ratio = 1e9ns/sec_inc
729 */
730 temp = (u64)(temp << 32);
731 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
732 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
733
734 /* initialize system time */
735 ktime_get_real_ts64(&now);
736
737 /* lower 32 bits of tv_sec are safe until y2106 */
738 stmmac_init_systime(priv, priv->ptpaddr,
739 (u32)now.tv_sec, now.tv_nsec);
740 }
741
742 return copy_to_user(ifr->ifr_data, &config,
743 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
744 }
745
746 /**
747 * stmmac_init_ptp - init PTP
748 * @priv: driver private structure
749 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
750 * This is done by looking at the HW cap. register.
751 * This function also registers the ptp driver.
752 */
stmmac_init_ptp(struct stmmac_priv * priv)753 static int stmmac_init_ptp(struct stmmac_priv *priv)
754 {
755 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
756
757 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
758 return -EOPNOTSUPP;
759
760 priv->adv_ts = 0;
761 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
762 if (xmac && priv->dma_cap.atime_stamp)
763 priv->adv_ts = 1;
764 /* Dwmac 3.x core with extend_desc can support adv_ts */
765 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
766 priv->adv_ts = 1;
767
768 if (priv->dma_cap.time_stamp)
769 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
770
771 if (priv->adv_ts)
772 netdev_info(priv->dev,
773 "IEEE 1588-2008 Advanced Timestamp supported\n");
774
775 priv->hwts_tx_en = 0;
776 priv->hwts_rx_en = 0;
777
778 stmmac_ptp_register(priv);
779
780 return 0;
781 }
782
stmmac_release_ptp(struct stmmac_priv * priv)783 static void stmmac_release_ptp(struct stmmac_priv *priv)
784 {
785 if (priv->plat->clk_ptp_ref)
786 clk_disable_unprepare(priv->plat->clk_ptp_ref);
787 stmmac_ptp_unregister(priv);
788 }
789
790 /**
791 * stmmac_mac_flow_ctrl - Configure flow control in all queues
792 * @priv: driver private structure
793 * Description: It is used for configuring the flow control in all queues
794 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)795 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
796 {
797 u32 tx_cnt = priv->plat->tx_queues_to_use;
798
799 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
800 priv->pause, tx_cnt);
801 }
802
803 /**
804 * stmmac_adjust_link - adjusts the link parameters
805 * @dev: net device structure
806 * Description: this is the helper called by the physical abstraction layer
807 * drivers to communicate the phy link status. According the speed and duplex
808 * this driver can invoke registered glue-logic as well.
809 * It also invoke the eee initialization because it could happen when switch
810 * on different networks (that are eee capable).
811 */
stmmac_adjust_link(struct net_device * dev)812 static void stmmac_adjust_link(struct net_device *dev)
813 {
814 struct stmmac_priv *priv = netdev_priv(dev);
815 struct phy_device *phydev = dev->phydev;
816 bool new_state = false;
817
818 if (!phydev)
819 return;
820
821 mutex_lock(&priv->lock);
822
823 if (phydev->link) {
824 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
825
826 /* Now we make sure that we can be in full duplex mode.
827 * If not, we operate in half-duplex mode. */
828 if (phydev->duplex != priv->oldduplex) {
829 new_state = true;
830 if (!phydev->duplex)
831 ctrl &= ~priv->hw->link.duplex;
832 else
833 ctrl |= priv->hw->link.duplex;
834 priv->oldduplex = phydev->duplex;
835 }
836 /* Flow Control operation */
837 if (phydev->pause)
838 stmmac_mac_flow_ctrl(priv, phydev->duplex);
839
840 if (phydev->speed != priv->speed) {
841 new_state = true;
842 ctrl &= ~priv->hw->link.speed_mask;
843 switch (phydev->speed) {
844 case SPEED_1000:
845 ctrl |= priv->hw->link.speed1000;
846 break;
847 case SPEED_100:
848 ctrl |= priv->hw->link.speed100;
849 break;
850 case SPEED_10:
851 ctrl |= priv->hw->link.speed10;
852 break;
853 default:
854 netif_warn(priv, link, priv->dev,
855 "broken speed: %d\n", phydev->speed);
856 phydev->speed = SPEED_UNKNOWN;
857 break;
858 }
859 if (phydev->speed != SPEED_UNKNOWN)
860 stmmac_hw_fix_mac_speed(priv);
861 priv->speed = phydev->speed;
862 }
863
864 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
865
866 if (!priv->oldlink) {
867 new_state = true;
868 priv->oldlink = true;
869 }
870 } else if (priv->oldlink) {
871 new_state = true;
872 priv->oldlink = false;
873 priv->speed = SPEED_UNKNOWN;
874 priv->oldduplex = DUPLEX_UNKNOWN;
875 }
876
877 if (new_state && netif_msg_link(priv))
878 phy_print_status(phydev);
879
880 mutex_unlock(&priv->lock);
881
882 if (phydev->is_pseudo_fixed_link)
883 /* Stop PHY layer to call the hook to adjust the link in case
884 * of a switch is attached to the stmmac driver.
885 */
886 phydev->irq = PHY_IGNORE_INTERRUPT;
887 else
888 /* At this stage, init the EEE if supported.
889 * Never called in case of fixed_link.
890 */
891 priv->eee_enabled = stmmac_eee_init(priv);
892 }
893
894 /**
895 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
896 * @priv: driver private structure
897 * Description: this is to verify if the HW supports the PCS.
898 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
899 * configured for the TBI, RTBI, or SGMII PHY interface.
900 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)901 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
902 {
903 int interface = priv->plat->interface;
904
905 if (priv->dma_cap.pcs) {
906 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
907 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
908 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
909 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
910 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
911 priv->hw->pcs = STMMAC_PCS_RGMII;
912 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
913 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
914 priv->hw->pcs = STMMAC_PCS_SGMII;
915 }
916 }
917 }
918
919 /**
920 * stmmac_init_phy - PHY initialization
921 * @dev: net device structure
922 * Description: it initializes the driver's PHY state, and attaches the PHY
923 * to the mac driver.
924 * Return value:
925 * 0 on success
926 */
stmmac_init_phy(struct net_device * dev)927 static int stmmac_init_phy(struct net_device *dev)
928 {
929 struct stmmac_priv *priv = netdev_priv(dev);
930 u32 tx_cnt = priv->plat->tx_queues_to_use;
931 struct phy_device *phydev;
932 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
933 char bus_id[MII_BUS_ID_SIZE];
934 int interface = priv->plat->interface;
935 int max_speed = priv->plat->max_speed;
936 priv->oldlink = false;
937 priv->speed = SPEED_UNKNOWN;
938 priv->oldduplex = DUPLEX_UNKNOWN;
939
940 if (priv->plat->phy_node) {
941 phydev = of_phy_connect(dev, priv->plat->phy_node,
942 &stmmac_adjust_link, 0, interface);
943 } else {
944 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
945 priv->plat->bus_id);
946
947 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
948 priv->plat->phy_addr);
949 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
950 phy_id_fmt);
951
952 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
953 interface);
954 }
955
956 if (IS_ERR_OR_NULL(phydev)) {
957 netdev_err(priv->dev, "Could not attach to PHY\n");
958 if (!phydev)
959 return -ENODEV;
960
961 return PTR_ERR(phydev);
962 }
963
964 /* Stop Advertising 1000BASE Capability if interface is not GMII */
965 if ((interface == PHY_INTERFACE_MODE_MII) ||
966 (interface == PHY_INTERFACE_MODE_RMII) ||
967 (max_speed < 1000 && max_speed > 0))
968 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
969 SUPPORTED_1000baseT_Full);
970
971 /*
972 * Half-duplex mode not supported with multiqueue
973 * half-duplex can only works with single queue
974 */
975 if (tx_cnt > 1)
976 phydev->supported &= ~(SUPPORTED_1000baseT_Half |
977 SUPPORTED_100baseT_Half |
978 SUPPORTED_10baseT_Half);
979
980 /*
981 * Broken HW is sometimes missing the pull-up resistor on the
982 * MDIO line, which results in reads to non-existent devices returning
983 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
984 * device as well.
985 * Note: phydev->phy_id is the result of reading the UID PHY registers.
986 */
987 if (!priv->plat->phy_node && phydev->phy_id == 0) {
988 phy_disconnect(phydev);
989 return -ENODEV;
990 }
991
992 /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
993 * subsequent PHY polling, make sure we force a link transition if
994 * we have a UP/DOWN/UP transition
995 */
996 if (phydev->is_pseudo_fixed_link)
997 phydev->irq = PHY_POLL;
998
999 phy_attached_info(phydev);
1000 return 0;
1001 }
1002
stmmac_display_rx_rings(struct stmmac_priv * priv)1003 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1004 {
1005 u32 rx_cnt = priv->plat->rx_queues_to_use;
1006 void *head_rx;
1007 u32 queue;
1008
1009 /* Display RX rings */
1010 for (queue = 0; queue < rx_cnt; queue++) {
1011 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1012
1013 pr_info("\tRX Queue %u rings\n", queue);
1014
1015 if (priv->extend_desc)
1016 head_rx = (void *)rx_q->dma_erx;
1017 else
1018 head_rx = (void *)rx_q->dma_rx;
1019
1020 /* Display RX ring */
1021 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1022 }
1023 }
1024
stmmac_display_tx_rings(struct stmmac_priv * priv)1025 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1026 {
1027 u32 tx_cnt = priv->plat->tx_queues_to_use;
1028 void *head_tx;
1029 u32 queue;
1030
1031 /* Display TX rings */
1032 for (queue = 0; queue < tx_cnt; queue++) {
1033 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1034
1035 pr_info("\tTX Queue %d rings\n", queue);
1036
1037 if (priv->extend_desc)
1038 head_tx = (void *)tx_q->dma_etx;
1039 else
1040 head_tx = (void *)tx_q->dma_tx;
1041
1042 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1043 }
1044 }
1045
stmmac_display_rings(struct stmmac_priv * priv)1046 static void stmmac_display_rings(struct stmmac_priv *priv)
1047 {
1048 /* Display RX ring */
1049 stmmac_display_rx_rings(priv);
1050
1051 /* Display TX ring */
1052 stmmac_display_tx_rings(priv);
1053 }
1054
stmmac_set_bfsize(int mtu,int bufsize)1055 static int stmmac_set_bfsize(int mtu, int bufsize)
1056 {
1057 int ret = bufsize;
1058
1059 if (mtu >= BUF_SIZE_8KiB)
1060 ret = BUF_SIZE_16KiB;
1061 else if (mtu >= BUF_SIZE_4KiB)
1062 ret = BUF_SIZE_8KiB;
1063 else if (mtu >= BUF_SIZE_2KiB)
1064 ret = BUF_SIZE_4KiB;
1065 else if (mtu > DEFAULT_BUFSIZE)
1066 ret = BUF_SIZE_2KiB;
1067 else
1068 ret = DEFAULT_BUFSIZE;
1069
1070 return ret;
1071 }
1072
1073 /**
1074 * stmmac_clear_rx_descriptors - clear RX descriptors
1075 * @priv: driver private structure
1076 * @queue: RX queue index
1077 * Description: this function is called to clear the RX descriptors
1078 * in case of both basic and extended descriptors are used.
1079 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,u32 queue)1080 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1081 {
1082 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1083 int i;
1084
1085 /* Clear the RX descriptors */
1086 for (i = 0; i < DMA_RX_SIZE; i++)
1087 if (priv->extend_desc)
1088 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1089 priv->use_riwt, priv->mode,
1090 (i == DMA_RX_SIZE - 1),
1091 priv->dma_buf_sz);
1092 else
1093 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1094 priv->use_riwt, priv->mode,
1095 (i == DMA_RX_SIZE - 1),
1096 priv->dma_buf_sz);
1097 }
1098
1099 /**
1100 * stmmac_clear_tx_descriptors - clear tx descriptors
1101 * @priv: driver private structure
1102 * @queue: TX queue index.
1103 * Description: this function is called to clear the TX descriptors
1104 * in case of both basic and extended descriptors are used.
1105 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,u32 queue)1106 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1107 {
1108 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1109 int i;
1110
1111 /* Clear the TX descriptors */
1112 for (i = 0; i < DMA_TX_SIZE; i++)
1113 if (priv->extend_desc)
1114 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1115 priv->mode, (i == DMA_TX_SIZE - 1));
1116 else
1117 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1118 priv->mode, (i == DMA_TX_SIZE - 1));
1119 }
1120
1121 /**
1122 * stmmac_clear_descriptors - clear descriptors
1123 * @priv: driver private structure
1124 * Description: this function is called to clear the TX and RX descriptors
1125 * in case of both basic and extended descriptors are used.
1126 */
stmmac_clear_descriptors(struct stmmac_priv * priv)1127 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1128 {
1129 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1130 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1131 u32 queue;
1132
1133 /* Clear the RX descriptors */
1134 for (queue = 0; queue < rx_queue_cnt; queue++)
1135 stmmac_clear_rx_descriptors(priv, queue);
1136
1137 /* Clear the TX descriptors */
1138 for (queue = 0; queue < tx_queue_cnt; queue++)
1139 stmmac_clear_tx_descriptors(priv, queue);
1140 }
1141
1142 /**
1143 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1144 * @priv: driver private structure
1145 * @p: descriptor pointer
1146 * @i: descriptor index
1147 * @flags: gfp flag
1148 * @queue: RX queue index
1149 * Description: this function is called to allocate a receive buffer, perform
1150 * the DMA mapping and init the descriptor.
1151 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct dma_desc * p,int i,gfp_t flags,u32 queue)1152 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1153 int i, gfp_t flags, u32 queue)
1154 {
1155 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1156 struct sk_buff *skb;
1157
1158 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1159 if (!skb) {
1160 netdev_err(priv->dev,
1161 "%s: Rx init fails; skb is NULL\n", __func__);
1162 return -ENOMEM;
1163 }
1164 rx_q->rx_skbuff[i] = skb;
1165 rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1166 priv->dma_buf_sz,
1167 DMA_FROM_DEVICE);
1168 if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1169 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1170 dev_kfree_skb_any(skb);
1171 return -EINVAL;
1172 }
1173
1174 stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1175
1176 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1177 stmmac_init_desc3(priv, p);
1178
1179 return 0;
1180 }
1181
1182 /**
1183 * stmmac_free_rx_buffer - free RX dma buffers
1184 * @priv: private structure
1185 * @queue: RX queue index
1186 * @i: buffer index.
1187 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,u32 queue,int i)1188 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1189 {
1190 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1191
1192 if (rx_q->rx_skbuff[i]) {
1193 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1194 priv->dma_buf_sz, DMA_FROM_DEVICE);
1195 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1196 }
1197 rx_q->rx_skbuff[i] = NULL;
1198 }
1199
1200 /**
1201 * stmmac_free_tx_buffer - free RX dma buffers
1202 * @priv: private structure
1203 * @queue: RX queue index
1204 * @i: buffer index.
1205 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,u32 queue,int i)1206 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1207 {
1208 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1209
1210 if (tx_q->tx_skbuff_dma[i].buf) {
1211 if (tx_q->tx_skbuff_dma[i].map_as_page)
1212 dma_unmap_page(priv->device,
1213 tx_q->tx_skbuff_dma[i].buf,
1214 tx_q->tx_skbuff_dma[i].len,
1215 DMA_TO_DEVICE);
1216 else
1217 dma_unmap_single(priv->device,
1218 tx_q->tx_skbuff_dma[i].buf,
1219 tx_q->tx_skbuff_dma[i].len,
1220 DMA_TO_DEVICE);
1221 }
1222
1223 if (tx_q->tx_skbuff[i]) {
1224 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1225 tx_q->tx_skbuff[i] = NULL;
1226 tx_q->tx_skbuff_dma[i].buf = 0;
1227 tx_q->tx_skbuff_dma[i].map_as_page = false;
1228 }
1229 }
1230
1231 /**
1232 * init_dma_rx_desc_rings - init the RX descriptor rings
1233 * @dev: net device structure
1234 * @flags: gfp flag.
1235 * Description: this function initializes the DMA RX descriptors
1236 * and allocates the socket buffers. It supports the chained and ring
1237 * modes.
1238 */
init_dma_rx_desc_rings(struct net_device * dev,gfp_t flags)1239 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1240 {
1241 struct stmmac_priv *priv = netdev_priv(dev);
1242 u32 rx_count = priv->plat->rx_queues_to_use;
1243 int ret = -ENOMEM;
1244 int bfsize = 0;
1245 int queue;
1246 int i;
1247
1248 bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1249 if (bfsize < 0)
1250 bfsize = 0;
1251
1252 if (bfsize < BUF_SIZE_16KiB)
1253 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1254
1255 priv->dma_buf_sz = bfsize;
1256
1257 /* RX INITIALIZATION */
1258 netif_dbg(priv, probe, priv->dev,
1259 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1260
1261 for (queue = 0; queue < rx_count; queue++) {
1262 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1263
1264 netif_dbg(priv, probe, priv->dev,
1265 "(%s) dma_rx_phy=0x%08x\n", __func__,
1266 (u32)rx_q->dma_rx_phy);
1267
1268 for (i = 0; i < DMA_RX_SIZE; i++) {
1269 struct dma_desc *p;
1270
1271 if (priv->extend_desc)
1272 p = &((rx_q->dma_erx + i)->basic);
1273 else
1274 p = rx_q->dma_rx + i;
1275
1276 ret = stmmac_init_rx_buffers(priv, p, i, flags,
1277 queue);
1278 if (ret)
1279 goto err_init_rx_buffers;
1280
1281 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1282 rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1283 (unsigned int)rx_q->rx_skbuff_dma[i]);
1284 }
1285
1286 rx_q->cur_rx = 0;
1287 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1288
1289 stmmac_clear_rx_descriptors(priv, queue);
1290
1291 /* Setup the chained descriptor addresses */
1292 if (priv->mode == STMMAC_CHAIN_MODE) {
1293 if (priv->extend_desc)
1294 stmmac_mode_init(priv, rx_q->dma_erx,
1295 rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1296 else
1297 stmmac_mode_init(priv, rx_q->dma_rx,
1298 rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1299 }
1300 }
1301
1302 buf_sz = bfsize;
1303
1304 return 0;
1305
1306 err_init_rx_buffers:
1307 while (queue >= 0) {
1308 while (--i >= 0)
1309 stmmac_free_rx_buffer(priv, queue, i);
1310
1311 if (queue == 0)
1312 break;
1313
1314 i = DMA_RX_SIZE;
1315 queue--;
1316 }
1317
1318 return ret;
1319 }
1320
1321 /**
1322 * init_dma_tx_desc_rings - init the TX descriptor rings
1323 * @dev: net device structure.
1324 * Description: this function initializes the DMA TX descriptors
1325 * and allocates the socket buffers. It supports the chained and ring
1326 * modes.
1327 */
init_dma_tx_desc_rings(struct net_device * dev)1328 static int init_dma_tx_desc_rings(struct net_device *dev)
1329 {
1330 struct stmmac_priv *priv = netdev_priv(dev);
1331 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1332 u32 queue;
1333 int i;
1334
1335 for (queue = 0; queue < tx_queue_cnt; queue++) {
1336 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1337
1338 netif_dbg(priv, probe, priv->dev,
1339 "(%s) dma_tx_phy=0x%08x\n", __func__,
1340 (u32)tx_q->dma_tx_phy);
1341
1342 /* Setup the chained descriptor addresses */
1343 if (priv->mode == STMMAC_CHAIN_MODE) {
1344 if (priv->extend_desc)
1345 stmmac_mode_init(priv, tx_q->dma_etx,
1346 tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1347 else
1348 stmmac_mode_init(priv, tx_q->dma_tx,
1349 tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1350 }
1351
1352 for (i = 0; i < DMA_TX_SIZE; i++) {
1353 struct dma_desc *p;
1354 if (priv->extend_desc)
1355 p = &((tx_q->dma_etx + i)->basic);
1356 else
1357 p = tx_q->dma_tx + i;
1358
1359 stmmac_clear_desc(priv, p);
1360
1361 tx_q->tx_skbuff_dma[i].buf = 0;
1362 tx_q->tx_skbuff_dma[i].map_as_page = false;
1363 tx_q->tx_skbuff_dma[i].len = 0;
1364 tx_q->tx_skbuff_dma[i].last_segment = false;
1365 tx_q->tx_skbuff[i] = NULL;
1366 }
1367
1368 tx_q->dirty_tx = 0;
1369 tx_q->cur_tx = 0;
1370 tx_q->mss = 0;
1371
1372 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1373 }
1374
1375 return 0;
1376 }
1377
1378 /**
1379 * init_dma_desc_rings - init the RX/TX descriptor rings
1380 * @dev: net device structure
1381 * @flags: gfp flag.
1382 * Description: this function initializes the DMA RX/TX descriptors
1383 * and allocates the socket buffers. It supports the chained and ring
1384 * modes.
1385 */
init_dma_desc_rings(struct net_device * dev,gfp_t flags)1386 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1387 {
1388 struct stmmac_priv *priv = netdev_priv(dev);
1389 int ret;
1390
1391 ret = init_dma_rx_desc_rings(dev, flags);
1392 if (ret)
1393 return ret;
1394
1395 ret = init_dma_tx_desc_rings(dev);
1396
1397 stmmac_clear_descriptors(priv);
1398
1399 if (netif_msg_hw(priv))
1400 stmmac_display_rings(priv);
1401
1402 return ret;
1403 }
1404
1405 /**
1406 * dma_free_rx_skbufs - free RX dma buffers
1407 * @priv: private structure
1408 * @queue: RX queue index
1409 */
dma_free_rx_skbufs(struct stmmac_priv * priv,u32 queue)1410 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1411 {
1412 int i;
1413
1414 for (i = 0; i < DMA_RX_SIZE; i++)
1415 stmmac_free_rx_buffer(priv, queue, i);
1416 }
1417
1418 /**
1419 * dma_free_tx_skbufs - free TX dma buffers
1420 * @priv: private structure
1421 * @queue: TX queue index
1422 */
dma_free_tx_skbufs(struct stmmac_priv * priv,u32 queue)1423 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1424 {
1425 int i;
1426
1427 for (i = 0; i < DMA_TX_SIZE; i++)
1428 stmmac_free_tx_buffer(priv, queue, i);
1429 }
1430
1431 /**
1432 * free_dma_rx_desc_resources - free RX dma desc resources
1433 * @priv: private structure
1434 */
free_dma_rx_desc_resources(struct stmmac_priv * priv)1435 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1436 {
1437 u32 rx_count = priv->plat->rx_queues_to_use;
1438 u32 queue;
1439
1440 /* Free RX queue resources */
1441 for (queue = 0; queue < rx_count; queue++) {
1442 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1443
1444 /* Release the DMA RX socket buffers */
1445 dma_free_rx_skbufs(priv, queue);
1446
1447 /* Free DMA regions of consistent memory previously allocated */
1448 if (!priv->extend_desc)
1449 dma_free_coherent(priv->device,
1450 DMA_RX_SIZE * sizeof(struct dma_desc),
1451 rx_q->dma_rx, rx_q->dma_rx_phy);
1452 else
1453 dma_free_coherent(priv->device, DMA_RX_SIZE *
1454 sizeof(struct dma_extended_desc),
1455 rx_q->dma_erx, rx_q->dma_rx_phy);
1456
1457 kfree(rx_q->rx_skbuff_dma);
1458 kfree(rx_q->rx_skbuff);
1459 }
1460 }
1461
1462 /**
1463 * free_dma_tx_desc_resources - free TX dma desc resources
1464 * @priv: private structure
1465 */
free_dma_tx_desc_resources(struct stmmac_priv * priv)1466 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1467 {
1468 u32 tx_count = priv->plat->tx_queues_to_use;
1469 u32 queue;
1470
1471 /* Free TX queue resources */
1472 for (queue = 0; queue < tx_count; queue++) {
1473 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1474
1475 /* Release the DMA TX socket buffers */
1476 dma_free_tx_skbufs(priv, queue);
1477
1478 /* Free DMA regions of consistent memory previously allocated */
1479 if (!priv->extend_desc)
1480 dma_free_coherent(priv->device,
1481 DMA_TX_SIZE * sizeof(struct dma_desc),
1482 tx_q->dma_tx, tx_q->dma_tx_phy);
1483 else
1484 dma_free_coherent(priv->device, DMA_TX_SIZE *
1485 sizeof(struct dma_extended_desc),
1486 tx_q->dma_etx, tx_q->dma_tx_phy);
1487
1488 kfree(tx_q->tx_skbuff_dma);
1489 kfree(tx_q->tx_skbuff);
1490 }
1491 }
1492
1493 /**
1494 * alloc_dma_rx_desc_resources - alloc RX resources.
1495 * @priv: private structure
1496 * Description: according to which descriptor can be used (extend or basic)
1497 * this function allocates the resources for TX and RX paths. In case of
1498 * reception, for example, it pre-allocated the RX socket buffer in order to
1499 * allow zero-copy mechanism.
1500 */
alloc_dma_rx_desc_resources(struct stmmac_priv * priv)1501 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1502 {
1503 u32 rx_count = priv->plat->rx_queues_to_use;
1504 int ret = -ENOMEM;
1505 u32 queue;
1506
1507 /* RX queues buffers and DMA */
1508 for (queue = 0; queue < rx_count; queue++) {
1509 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1510
1511 rx_q->queue_index = queue;
1512 rx_q->priv_data = priv;
1513
1514 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1515 sizeof(dma_addr_t),
1516 GFP_KERNEL);
1517 if (!rx_q->rx_skbuff_dma)
1518 goto err_dma;
1519
1520 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1521 sizeof(struct sk_buff *),
1522 GFP_KERNEL);
1523 if (!rx_q->rx_skbuff)
1524 goto err_dma;
1525
1526 if (priv->extend_desc) {
1527 rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1528 DMA_RX_SIZE *
1529 sizeof(struct
1530 dma_extended_desc),
1531 &rx_q->dma_rx_phy,
1532 GFP_KERNEL);
1533 if (!rx_q->dma_erx)
1534 goto err_dma;
1535
1536 } else {
1537 rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1538 DMA_RX_SIZE *
1539 sizeof(struct
1540 dma_desc),
1541 &rx_q->dma_rx_phy,
1542 GFP_KERNEL);
1543 if (!rx_q->dma_rx)
1544 goto err_dma;
1545 }
1546 }
1547
1548 return 0;
1549
1550 err_dma:
1551 free_dma_rx_desc_resources(priv);
1552
1553 return ret;
1554 }
1555
1556 /**
1557 * alloc_dma_tx_desc_resources - alloc TX resources.
1558 * @priv: private structure
1559 * Description: according to which descriptor can be used (extend or basic)
1560 * this function allocates the resources for TX and RX paths. In case of
1561 * reception, for example, it pre-allocated the RX socket buffer in order to
1562 * allow zero-copy mechanism.
1563 */
alloc_dma_tx_desc_resources(struct stmmac_priv * priv)1564 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1565 {
1566 u32 tx_count = priv->plat->tx_queues_to_use;
1567 int ret = -ENOMEM;
1568 u32 queue;
1569
1570 /* TX queues buffers and DMA */
1571 for (queue = 0; queue < tx_count; queue++) {
1572 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1573
1574 tx_q->queue_index = queue;
1575 tx_q->priv_data = priv;
1576
1577 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1578 sizeof(*tx_q->tx_skbuff_dma),
1579 GFP_KERNEL);
1580 if (!tx_q->tx_skbuff_dma)
1581 goto err_dma;
1582
1583 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1584 sizeof(struct sk_buff *),
1585 GFP_KERNEL);
1586 if (!tx_q->tx_skbuff)
1587 goto err_dma;
1588
1589 if (priv->extend_desc) {
1590 tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1591 DMA_TX_SIZE *
1592 sizeof(struct
1593 dma_extended_desc),
1594 &tx_q->dma_tx_phy,
1595 GFP_KERNEL);
1596 if (!tx_q->dma_etx)
1597 goto err_dma;
1598 } else {
1599 tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1600 DMA_TX_SIZE *
1601 sizeof(struct
1602 dma_desc),
1603 &tx_q->dma_tx_phy,
1604 GFP_KERNEL);
1605 if (!tx_q->dma_tx)
1606 goto err_dma;
1607 }
1608 }
1609
1610 return 0;
1611
1612 err_dma:
1613 free_dma_tx_desc_resources(priv);
1614
1615 return ret;
1616 }
1617
1618 /**
1619 * alloc_dma_desc_resources - alloc TX/RX resources.
1620 * @priv: private structure
1621 * Description: according to which descriptor can be used (extend or basic)
1622 * this function allocates the resources for TX and RX paths. In case of
1623 * reception, for example, it pre-allocated the RX socket buffer in order to
1624 * allow zero-copy mechanism.
1625 */
alloc_dma_desc_resources(struct stmmac_priv * priv)1626 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1627 {
1628 /* RX Allocation */
1629 int ret = alloc_dma_rx_desc_resources(priv);
1630
1631 if (ret)
1632 return ret;
1633
1634 ret = alloc_dma_tx_desc_resources(priv);
1635
1636 return ret;
1637 }
1638
1639 /**
1640 * free_dma_desc_resources - free dma desc resources
1641 * @priv: private structure
1642 */
free_dma_desc_resources(struct stmmac_priv * priv)1643 static void free_dma_desc_resources(struct stmmac_priv *priv)
1644 {
1645 /* Release the DMA RX socket buffers */
1646 free_dma_rx_desc_resources(priv);
1647
1648 /* Release the DMA TX socket buffers */
1649 free_dma_tx_desc_resources(priv);
1650 }
1651
1652 /**
1653 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
1654 * @priv: driver private structure
1655 * Description: It is used for enabling the rx queues in the MAC
1656 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)1657 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1658 {
1659 u32 rx_queues_count = priv->plat->rx_queues_to_use;
1660 int queue;
1661 u8 mode;
1662
1663 for (queue = 0; queue < rx_queues_count; queue++) {
1664 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1665 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1666 }
1667 }
1668
1669 /**
1670 * stmmac_start_rx_dma - start RX DMA channel
1671 * @priv: driver private structure
1672 * @chan: RX channel index
1673 * Description:
1674 * This starts a RX DMA channel
1675 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)1676 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1677 {
1678 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1679 stmmac_start_rx(priv, priv->ioaddr, chan);
1680 }
1681
1682 /**
1683 * stmmac_start_tx_dma - start TX DMA channel
1684 * @priv: driver private structure
1685 * @chan: TX channel index
1686 * Description:
1687 * This starts a TX DMA channel
1688 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)1689 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1690 {
1691 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1692 stmmac_start_tx(priv, priv->ioaddr, chan);
1693 }
1694
1695 /**
1696 * stmmac_stop_rx_dma - stop RX DMA channel
1697 * @priv: driver private structure
1698 * @chan: RX channel index
1699 * Description:
1700 * This stops a RX DMA channel
1701 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)1702 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1703 {
1704 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1705 stmmac_stop_rx(priv, priv->ioaddr, chan);
1706 }
1707
1708 /**
1709 * stmmac_stop_tx_dma - stop TX DMA channel
1710 * @priv: driver private structure
1711 * @chan: TX channel index
1712 * Description:
1713 * This stops a TX DMA channel
1714 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)1715 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1716 {
1717 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1718 stmmac_stop_tx(priv, priv->ioaddr, chan);
1719 }
1720
1721 /**
1722 * stmmac_start_all_dma - start all RX and TX DMA channels
1723 * @priv: driver private structure
1724 * Description:
1725 * This starts all the RX and TX DMA channels
1726 */
stmmac_start_all_dma(struct stmmac_priv * priv)1727 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1728 {
1729 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1730 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1731 u32 chan = 0;
1732
1733 for (chan = 0; chan < rx_channels_count; chan++)
1734 stmmac_start_rx_dma(priv, chan);
1735
1736 for (chan = 0; chan < tx_channels_count; chan++)
1737 stmmac_start_tx_dma(priv, chan);
1738 }
1739
1740 /**
1741 * stmmac_stop_all_dma - stop all RX and TX DMA channels
1742 * @priv: driver private structure
1743 * Description:
1744 * This stops the RX and TX DMA channels
1745 */
stmmac_stop_all_dma(struct stmmac_priv * priv)1746 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1747 {
1748 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1749 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1750 u32 chan = 0;
1751
1752 for (chan = 0; chan < rx_channels_count; chan++)
1753 stmmac_stop_rx_dma(priv, chan);
1754
1755 for (chan = 0; chan < tx_channels_count; chan++)
1756 stmmac_stop_tx_dma(priv, chan);
1757 }
1758
1759 /**
1760 * stmmac_dma_operation_mode - HW DMA operation mode
1761 * @priv: driver private structure
1762 * Description: it is used for configuring the DMA operation mode register in
1763 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1764 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)1765 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1766 {
1767 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1768 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1769 int rxfifosz = priv->plat->rx_fifo_size;
1770 int txfifosz = priv->plat->tx_fifo_size;
1771 u32 txmode = 0;
1772 u32 rxmode = 0;
1773 u32 chan = 0;
1774 u8 qmode = 0;
1775
1776 if (rxfifosz == 0)
1777 rxfifosz = priv->dma_cap.rx_fifo_size;
1778 if (txfifosz == 0)
1779 txfifosz = priv->dma_cap.tx_fifo_size;
1780
1781 /* Adjust for real per queue fifo size */
1782 rxfifosz /= rx_channels_count;
1783 txfifosz /= tx_channels_count;
1784
1785 if (priv->plat->force_thresh_dma_mode) {
1786 txmode = tc;
1787 rxmode = tc;
1788 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1789 /*
1790 * In case of GMAC, SF mode can be enabled
1791 * to perform the TX COE in HW. This depends on:
1792 * 1) TX COE if actually supported
1793 * 2) There is no bugged Jumbo frame support
1794 * that needs to not insert csum in the TDES.
1795 */
1796 txmode = SF_DMA_MODE;
1797 rxmode = SF_DMA_MODE;
1798 priv->xstats.threshold = SF_DMA_MODE;
1799 } else {
1800 txmode = tc;
1801 rxmode = SF_DMA_MODE;
1802 }
1803
1804 /* configure all channels */
1805 for (chan = 0; chan < rx_channels_count; chan++) {
1806 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1807
1808 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1809 rxfifosz, qmode);
1810 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1811 chan);
1812 }
1813
1814 for (chan = 0; chan < tx_channels_count; chan++) {
1815 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1816
1817 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1818 txfifosz, qmode);
1819 }
1820 }
1821
1822 /**
1823 * stmmac_tx_clean - to manage the transmission completion
1824 * @priv: driver private structure
1825 * @queue: TX queue index
1826 * Description: it reclaims the transmit resources after transmission completes.
1827 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue)1828 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1829 {
1830 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1831 unsigned int bytes_compl = 0, pkts_compl = 0;
1832 unsigned int entry, count = 0;
1833
1834 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1835
1836 priv->xstats.tx_clean++;
1837
1838 entry = tx_q->dirty_tx;
1839 while ((entry != tx_q->cur_tx) && (count < budget)) {
1840 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1841 struct dma_desc *p;
1842 int status;
1843
1844 if (priv->extend_desc)
1845 p = (struct dma_desc *)(tx_q->dma_etx + entry);
1846 else
1847 p = tx_q->dma_tx + entry;
1848
1849 status = stmmac_tx_status(priv, &priv->dev->stats,
1850 &priv->xstats, p, priv->ioaddr);
1851 /* Check if the descriptor is owned by the DMA */
1852 if (unlikely(status & tx_dma_own))
1853 break;
1854
1855 count++;
1856
1857 /* Make sure descriptor fields are read after reading
1858 * the own bit.
1859 */
1860 dma_rmb();
1861
1862 /* Just consider the last segment and ...*/
1863 if (likely(!(status & tx_not_ls))) {
1864 /* ... verify the status error condition */
1865 if (unlikely(status & tx_err)) {
1866 priv->dev->stats.tx_errors++;
1867 } else {
1868 priv->dev->stats.tx_packets++;
1869 priv->xstats.tx_pkt_n++;
1870 }
1871 stmmac_get_tx_hwtstamp(priv, p, skb);
1872 }
1873
1874 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1875 if (tx_q->tx_skbuff_dma[entry].map_as_page)
1876 dma_unmap_page(priv->device,
1877 tx_q->tx_skbuff_dma[entry].buf,
1878 tx_q->tx_skbuff_dma[entry].len,
1879 DMA_TO_DEVICE);
1880 else
1881 dma_unmap_single(priv->device,
1882 tx_q->tx_skbuff_dma[entry].buf,
1883 tx_q->tx_skbuff_dma[entry].len,
1884 DMA_TO_DEVICE);
1885 tx_q->tx_skbuff_dma[entry].buf = 0;
1886 tx_q->tx_skbuff_dma[entry].len = 0;
1887 tx_q->tx_skbuff_dma[entry].map_as_page = false;
1888 }
1889
1890 stmmac_clean_desc3(priv, tx_q, p);
1891
1892 tx_q->tx_skbuff_dma[entry].last_segment = false;
1893 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1894
1895 if (likely(skb != NULL)) {
1896 pkts_compl++;
1897 bytes_compl += skb->len;
1898 dev_consume_skb_any(skb);
1899 tx_q->tx_skbuff[entry] = NULL;
1900 }
1901
1902 stmmac_release_tx_desc(priv, p, priv->mode);
1903
1904 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1905 }
1906 tx_q->dirty_tx = entry;
1907
1908 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1909 pkts_compl, bytes_compl);
1910
1911 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1912 queue))) &&
1913 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1914
1915 netif_dbg(priv, tx_done, priv->dev,
1916 "%s: restart transmit\n", __func__);
1917 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1918 }
1919
1920 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1921 stmmac_enable_eee_mode(priv);
1922 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1923 }
1924
1925 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1926
1927 return count;
1928 }
1929
1930 /**
1931 * stmmac_tx_err - to manage the tx error
1932 * @priv: driver private structure
1933 * @chan: channel index
1934 * Description: it cleans the descriptors and restarts the transmission
1935 * in case of transmission errors.
1936 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)1937 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1938 {
1939 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1940 int i;
1941
1942 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1943
1944 stmmac_stop_tx_dma(priv, chan);
1945 dma_free_tx_skbufs(priv, chan);
1946 for (i = 0; i < DMA_TX_SIZE; i++)
1947 if (priv->extend_desc)
1948 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1949 priv->mode, (i == DMA_TX_SIZE - 1));
1950 else
1951 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1952 priv->mode, (i == DMA_TX_SIZE - 1));
1953 tx_q->dirty_tx = 0;
1954 tx_q->cur_tx = 0;
1955 tx_q->mss = 0;
1956 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1957 stmmac_start_tx_dma(priv, chan);
1958
1959 priv->dev->stats.tx_errors++;
1960 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1961 }
1962
1963 /**
1964 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1965 * @priv: driver private structure
1966 * @txmode: TX operating mode
1967 * @rxmode: RX operating mode
1968 * @chan: channel index
1969 * Description: it is used for configuring of the DMA operation mode in
1970 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1971 * mode.
1972 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)1973 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1974 u32 rxmode, u32 chan)
1975 {
1976 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1977 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1978 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1979 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1980 int rxfifosz = priv->plat->rx_fifo_size;
1981 int txfifosz = priv->plat->tx_fifo_size;
1982
1983 if (rxfifosz == 0)
1984 rxfifosz = priv->dma_cap.rx_fifo_size;
1985 if (txfifosz == 0)
1986 txfifosz = priv->dma_cap.tx_fifo_size;
1987
1988 /* Adjust for real per queue fifo size */
1989 rxfifosz /= rx_channels_count;
1990 txfifosz /= tx_channels_count;
1991
1992 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
1993 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
1994 }
1995
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)1996 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
1997 {
1998 int ret;
1999
2000 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2001 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2002 if (ret && (ret != -EINVAL)) {
2003 stmmac_global_err(priv);
2004 return true;
2005 }
2006
2007 return false;
2008 }
2009
stmmac_napi_check(struct stmmac_priv * priv,u32 chan)2010 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2011 {
2012 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2013 &priv->xstats, chan);
2014 struct stmmac_channel *ch = &priv->channel[chan];
2015 bool needs_work = false;
2016
2017 if ((status & handle_rx) && ch->has_rx) {
2018 needs_work = true;
2019 } else {
2020 status &= ~handle_rx;
2021 }
2022
2023 if ((status & handle_tx) && ch->has_tx) {
2024 needs_work = true;
2025 } else {
2026 status &= ~handle_tx;
2027 }
2028
2029 if (needs_work && napi_schedule_prep(&ch->napi)) {
2030 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2031 __napi_schedule(&ch->napi);
2032 }
2033
2034 return status;
2035 }
2036
2037 /**
2038 * stmmac_dma_interrupt - DMA ISR
2039 * @priv: driver private structure
2040 * Description: this is the DMA ISR. It is called by the main ISR.
2041 * It calls the dwmac dma routine and schedule poll method in case of some
2042 * work can be done.
2043 */
stmmac_dma_interrupt(struct stmmac_priv * priv)2044 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2045 {
2046 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2047 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2048 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2049 tx_channel_count : rx_channel_count;
2050 u32 chan;
2051 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2052
2053 /* Make sure we never check beyond our status buffer. */
2054 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2055 channels_to_check = ARRAY_SIZE(status);
2056
2057 for (chan = 0; chan < channels_to_check; chan++)
2058 status[chan] = stmmac_napi_check(priv, chan);
2059
2060 for (chan = 0; chan < tx_channel_count; chan++) {
2061 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2062 /* Try to bump up the dma threshold on this failure */
2063 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2064 (tc <= 256)) {
2065 tc += 64;
2066 if (priv->plat->force_thresh_dma_mode)
2067 stmmac_set_dma_operation_mode(priv,
2068 tc,
2069 tc,
2070 chan);
2071 else
2072 stmmac_set_dma_operation_mode(priv,
2073 tc,
2074 SF_DMA_MODE,
2075 chan);
2076 priv->xstats.threshold = tc;
2077 }
2078 } else if (unlikely(status[chan] == tx_hard_error)) {
2079 stmmac_tx_err(priv, chan);
2080 }
2081 }
2082 }
2083
2084 /**
2085 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2086 * @priv: driver private structure
2087 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2088 */
stmmac_mmc_setup(struct stmmac_priv * priv)2089 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2090 {
2091 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2092 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2093
2094 dwmac_mmc_intr_all_mask(priv->mmcaddr);
2095
2096 if (priv->dma_cap.rmon) {
2097 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2098 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2099 } else
2100 netdev_info(priv->dev, "No MAC Management Counters available\n");
2101 }
2102
2103 /**
2104 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2105 * @priv: driver private structure
2106 * Description:
2107 * new GMAC chip generations have a new register to indicate the
2108 * presence of the optional feature/functions.
2109 * This can be also used to override the value passed through the
2110 * platform and necessary for old MAC10/100 and GMAC chips.
2111 */
stmmac_get_hw_features(struct stmmac_priv * priv)2112 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2113 {
2114 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2115 }
2116
2117 /**
2118 * stmmac_check_ether_addr - check if the MAC addr is valid
2119 * @priv: driver private structure
2120 * Description:
2121 * it is to verify if the MAC address is valid, in case of failures it
2122 * generates a random MAC address
2123 */
stmmac_check_ether_addr(struct stmmac_priv * priv)2124 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2125 {
2126 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2127 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2128 if (!is_valid_ether_addr(priv->dev->dev_addr))
2129 eth_hw_addr_random(priv->dev);
2130 netdev_info(priv->dev, "device MAC address %pM\n",
2131 priv->dev->dev_addr);
2132 }
2133 }
2134
2135 /**
2136 * stmmac_init_dma_engine - DMA init.
2137 * @priv: driver private structure
2138 * Description:
2139 * It inits the DMA invoking the specific MAC/GMAC callback.
2140 * Some DMA parameters can be passed from the platform;
2141 * in case of these are not passed a default is kept for the MAC or GMAC.
2142 */
stmmac_init_dma_engine(struct stmmac_priv * priv)2143 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2144 {
2145 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2146 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2147 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2148 struct stmmac_rx_queue *rx_q;
2149 struct stmmac_tx_queue *tx_q;
2150 u32 chan = 0;
2151 int atds = 0;
2152 int ret = 0;
2153
2154 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2155 dev_err(priv->device, "Invalid DMA configuration\n");
2156 return -EINVAL;
2157 }
2158
2159 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2160 atds = 1;
2161
2162 ret = stmmac_reset(priv, priv->ioaddr);
2163 if (ret) {
2164 dev_err(priv->device, "Failed to reset the dma\n");
2165 return ret;
2166 }
2167
2168 /* DMA Configuration */
2169 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2170
2171 if (priv->plat->axi)
2172 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2173
2174 /* DMA CSR Channel configuration */
2175 for (chan = 0; chan < dma_csr_ch; chan++)
2176 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2177
2178 /* DMA RX Channel Configuration */
2179 for (chan = 0; chan < rx_channels_count; chan++) {
2180 rx_q = &priv->rx_queue[chan];
2181
2182 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2183 rx_q->dma_rx_phy, chan);
2184
2185 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2186 (DMA_RX_SIZE * sizeof(struct dma_desc));
2187 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2188 rx_q->rx_tail_addr, chan);
2189 }
2190
2191 /* DMA TX Channel Configuration */
2192 for (chan = 0; chan < tx_channels_count; chan++) {
2193 tx_q = &priv->tx_queue[chan];
2194
2195 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2196 tx_q->dma_tx_phy, chan);
2197
2198 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2199 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2200 tx_q->tx_tail_addr, chan);
2201 }
2202
2203 return ret;
2204 }
2205
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)2206 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2207 {
2208 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2209
2210 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2211 }
2212
2213 /**
2214 * stmmac_tx_timer - mitigation sw timer for tx.
2215 * @data: data pointer
2216 * Description:
2217 * This is the timer handler to directly invoke the stmmac_tx_clean.
2218 */
stmmac_tx_timer(struct timer_list * t)2219 static void stmmac_tx_timer(struct timer_list *t)
2220 {
2221 struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2222 struct stmmac_priv *priv = tx_q->priv_data;
2223 struct stmmac_channel *ch;
2224
2225 ch = &priv->channel[tx_q->queue_index];
2226
2227 if (likely(napi_schedule_prep(&ch->napi)))
2228 __napi_schedule(&ch->napi);
2229 }
2230
2231 /**
2232 * stmmac_init_tx_coalesce - init tx mitigation options.
2233 * @priv: driver private structure
2234 * Description:
2235 * This inits the transmit coalesce parameters: i.e. timer rate,
2236 * timer handler and default threshold used for enabling the
2237 * interrupt on completion bit.
2238 */
stmmac_init_tx_coalesce(struct stmmac_priv * priv)2239 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2240 {
2241 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2242 u32 chan;
2243
2244 priv->tx_coal_frames = STMMAC_TX_FRAMES;
2245 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2246
2247 for (chan = 0; chan < tx_channel_count; chan++) {
2248 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2249
2250 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2251 }
2252 }
2253
stmmac_set_rings_length(struct stmmac_priv * priv)2254 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2255 {
2256 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2257 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2258 u32 chan;
2259
2260 /* set TX ring length */
2261 for (chan = 0; chan < tx_channels_count; chan++)
2262 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2263 (DMA_TX_SIZE - 1), chan);
2264
2265 /* set RX ring length */
2266 for (chan = 0; chan < rx_channels_count; chan++)
2267 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2268 (DMA_RX_SIZE - 1), chan);
2269 }
2270
2271 /**
2272 * stmmac_set_tx_queue_weight - Set TX queue weight
2273 * @priv: driver private structure
2274 * Description: It is used for setting TX queues weight
2275 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)2276 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2277 {
2278 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2279 u32 weight;
2280 u32 queue;
2281
2282 for (queue = 0; queue < tx_queues_count; queue++) {
2283 weight = priv->plat->tx_queues_cfg[queue].weight;
2284 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2285 }
2286 }
2287
2288 /**
2289 * stmmac_configure_cbs - Configure CBS in TX queue
2290 * @priv: driver private structure
2291 * Description: It is used for configuring CBS in AVB TX queues
2292 */
stmmac_configure_cbs(struct stmmac_priv * priv)2293 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2294 {
2295 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2296 u32 mode_to_use;
2297 u32 queue;
2298
2299 /* queue 0 is reserved for legacy traffic */
2300 for (queue = 1; queue < tx_queues_count; queue++) {
2301 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2302 if (mode_to_use == MTL_QUEUE_DCB)
2303 continue;
2304
2305 stmmac_config_cbs(priv, priv->hw,
2306 priv->plat->tx_queues_cfg[queue].send_slope,
2307 priv->plat->tx_queues_cfg[queue].idle_slope,
2308 priv->plat->tx_queues_cfg[queue].high_credit,
2309 priv->plat->tx_queues_cfg[queue].low_credit,
2310 queue);
2311 }
2312 }
2313
2314 /**
2315 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2316 * @priv: driver private structure
2317 * Description: It is used for mapping RX queues to RX dma channels
2318 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)2319 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2320 {
2321 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2322 u32 queue;
2323 u32 chan;
2324
2325 for (queue = 0; queue < rx_queues_count; queue++) {
2326 chan = priv->plat->rx_queues_cfg[queue].chan;
2327 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2328 }
2329 }
2330
2331 /**
2332 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2333 * @priv: driver private structure
2334 * Description: It is used for configuring the RX Queue Priority
2335 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)2336 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2337 {
2338 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2339 u32 queue;
2340 u32 prio;
2341
2342 for (queue = 0; queue < rx_queues_count; queue++) {
2343 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2344 continue;
2345
2346 prio = priv->plat->rx_queues_cfg[queue].prio;
2347 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2348 }
2349 }
2350
2351 /**
2352 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2353 * @priv: driver private structure
2354 * Description: It is used for configuring the TX Queue Priority
2355 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)2356 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2357 {
2358 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2359 u32 queue;
2360 u32 prio;
2361
2362 for (queue = 0; queue < tx_queues_count; queue++) {
2363 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2364 continue;
2365
2366 prio = priv->plat->tx_queues_cfg[queue].prio;
2367 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2368 }
2369 }
2370
2371 /**
2372 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2373 * @priv: driver private structure
2374 * Description: It is used for configuring the RX queue routing
2375 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)2376 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2377 {
2378 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2379 u32 queue;
2380 u8 packet;
2381
2382 for (queue = 0; queue < rx_queues_count; queue++) {
2383 /* no specific packet type routing specified for the queue */
2384 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2385 continue;
2386
2387 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2388 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2389 }
2390 }
2391
2392 /**
2393 * stmmac_mtl_configuration - Configure MTL
2394 * @priv: driver private structure
2395 * Description: It is used for configurring MTL
2396 */
stmmac_mtl_configuration(struct stmmac_priv * priv)2397 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2398 {
2399 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2400 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2401
2402 if (tx_queues_count > 1)
2403 stmmac_set_tx_queue_weight(priv);
2404
2405 /* Configure MTL RX algorithms */
2406 if (rx_queues_count > 1)
2407 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2408 priv->plat->rx_sched_algorithm);
2409
2410 /* Configure MTL TX algorithms */
2411 if (tx_queues_count > 1)
2412 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2413 priv->plat->tx_sched_algorithm);
2414
2415 /* Configure CBS in AVB TX queues */
2416 if (tx_queues_count > 1)
2417 stmmac_configure_cbs(priv);
2418
2419 /* Map RX MTL to DMA channels */
2420 stmmac_rx_queue_dma_chan_map(priv);
2421
2422 /* Enable MAC RX Queues */
2423 stmmac_mac_enable_rx_queues(priv);
2424
2425 /* Set RX priorities */
2426 if (rx_queues_count > 1)
2427 stmmac_mac_config_rx_queues_prio(priv);
2428
2429 /* Set TX priorities */
2430 if (tx_queues_count > 1)
2431 stmmac_mac_config_tx_queues_prio(priv);
2432
2433 /* Set RX routing */
2434 if (rx_queues_count > 1)
2435 stmmac_mac_config_rx_queues_routing(priv);
2436 }
2437
stmmac_safety_feat_configuration(struct stmmac_priv * priv)2438 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2439 {
2440 if (priv->dma_cap.asp) {
2441 netdev_info(priv->dev, "Enabling Safety Features\n");
2442 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2443 } else {
2444 netdev_info(priv->dev, "No Safety Features support found\n");
2445 }
2446 }
2447
2448 /**
2449 * stmmac_hw_setup - setup mac in a usable state.
2450 * @dev : pointer to the device structure.
2451 * Description:
2452 * this is the main function to setup the HW in a usable state because the
2453 * dma engine is reset, the core registers are configured (e.g. AXI,
2454 * Checksum features, timers). The DMA is ready to start receiving and
2455 * transmitting.
2456 * Return value:
2457 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2458 * file on failure.
2459 */
stmmac_hw_setup(struct net_device * dev,bool init_ptp)2460 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2461 {
2462 struct stmmac_priv *priv = netdev_priv(dev);
2463 u32 rx_cnt = priv->plat->rx_queues_to_use;
2464 u32 tx_cnt = priv->plat->tx_queues_to_use;
2465 u32 chan;
2466 int ret;
2467
2468 /* DMA initialization and SW reset */
2469 ret = stmmac_init_dma_engine(priv);
2470 if (ret < 0) {
2471 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2472 __func__);
2473 return ret;
2474 }
2475
2476 /* Copy the MAC addr into the HW */
2477 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2478
2479 /* PS and related bits will be programmed according to the speed */
2480 if (priv->hw->pcs) {
2481 int speed = priv->plat->mac_port_sel_speed;
2482
2483 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2484 (speed == SPEED_1000)) {
2485 priv->hw->ps = speed;
2486 } else {
2487 dev_warn(priv->device, "invalid port speed\n");
2488 priv->hw->ps = 0;
2489 }
2490 }
2491
2492 /* Initialize the MAC Core */
2493 stmmac_core_init(priv, priv->hw, dev);
2494
2495 /* Initialize MTL*/
2496 stmmac_mtl_configuration(priv);
2497
2498 /* Initialize Safety Features */
2499 stmmac_safety_feat_configuration(priv);
2500
2501 ret = stmmac_rx_ipc(priv, priv->hw);
2502 if (!ret) {
2503 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2504 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2505 priv->hw->rx_csum = 0;
2506 }
2507
2508 /* Enable the MAC Rx/Tx */
2509 stmmac_mac_set(priv, priv->ioaddr, true);
2510
2511 /* Set the HW DMA mode and the COE */
2512 stmmac_dma_operation_mode(priv);
2513
2514 stmmac_mmc_setup(priv);
2515
2516 if (init_ptp) {
2517 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2518 if (ret < 0)
2519 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2520
2521 ret = stmmac_init_ptp(priv);
2522 if (ret == -EOPNOTSUPP)
2523 netdev_warn(priv->dev, "PTP not supported by HW\n");
2524 else if (ret)
2525 netdev_warn(priv->dev, "PTP init failed\n");
2526 }
2527
2528 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2529
2530 if (priv->use_riwt) {
2531 ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2532 if (!ret)
2533 priv->rx_riwt = MAX_DMA_RIWT;
2534 }
2535
2536 if (priv->hw->pcs)
2537 stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2538
2539 /* set TX and RX rings length */
2540 stmmac_set_rings_length(priv);
2541
2542 /* Enable TSO */
2543 if (priv->tso) {
2544 for (chan = 0; chan < tx_cnt; chan++)
2545 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2546 }
2547
2548 /* Start the ball rolling... */
2549 stmmac_start_all_dma(priv);
2550
2551 return 0;
2552 }
2553
stmmac_hw_teardown(struct net_device * dev)2554 static void stmmac_hw_teardown(struct net_device *dev)
2555 {
2556 struct stmmac_priv *priv = netdev_priv(dev);
2557
2558 clk_disable_unprepare(priv->plat->clk_ptp_ref);
2559 }
2560
2561 /**
2562 * stmmac_open - open entry point of the driver
2563 * @dev : pointer to the device structure.
2564 * Description:
2565 * This function is the open entry point of the driver.
2566 * Return value:
2567 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2568 * file on failure.
2569 */
stmmac_open(struct net_device * dev)2570 static int stmmac_open(struct net_device *dev)
2571 {
2572 struct stmmac_priv *priv = netdev_priv(dev);
2573 u32 chan;
2574 int ret;
2575
2576 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2577 priv->hw->pcs != STMMAC_PCS_TBI &&
2578 priv->hw->pcs != STMMAC_PCS_RTBI) {
2579 ret = stmmac_init_phy(dev);
2580 if (ret) {
2581 netdev_err(priv->dev,
2582 "%s: Cannot attach to PHY (error: %d)\n",
2583 __func__, ret);
2584 return ret;
2585 }
2586 }
2587
2588 /* Extra statistics */
2589 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2590 priv->xstats.threshold = tc;
2591
2592 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2593 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2594
2595 ret = alloc_dma_desc_resources(priv);
2596 if (ret < 0) {
2597 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2598 __func__);
2599 goto dma_desc_error;
2600 }
2601
2602 ret = init_dma_desc_rings(dev, GFP_KERNEL);
2603 if (ret < 0) {
2604 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2605 __func__);
2606 goto init_error;
2607 }
2608
2609 ret = stmmac_hw_setup(dev, true);
2610 if (ret < 0) {
2611 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2612 goto init_error;
2613 }
2614
2615 stmmac_init_tx_coalesce(priv);
2616
2617 if (dev->phydev)
2618 phy_start(dev->phydev);
2619
2620 /* Request the IRQ lines */
2621 ret = request_irq(dev->irq, stmmac_interrupt,
2622 IRQF_SHARED, dev->name, dev);
2623 if (unlikely(ret < 0)) {
2624 netdev_err(priv->dev,
2625 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2626 __func__, dev->irq, ret);
2627 goto irq_error;
2628 }
2629
2630 /* Request the Wake IRQ in case of another line is used for WoL */
2631 if (priv->wol_irq != dev->irq) {
2632 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2633 IRQF_SHARED, dev->name, dev);
2634 if (unlikely(ret < 0)) {
2635 netdev_err(priv->dev,
2636 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2637 __func__, priv->wol_irq, ret);
2638 goto wolirq_error;
2639 }
2640 }
2641
2642 /* Request the IRQ lines */
2643 if (priv->lpi_irq > 0) {
2644 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2645 dev->name, dev);
2646 if (unlikely(ret < 0)) {
2647 netdev_err(priv->dev,
2648 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2649 __func__, priv->lpi_irq, ret);
2650 goto lpiirq_error;
2651 }
2652 }
2653
2654 stmmac_enable_all_queues(priv);
2655 netif_tx_start_all_queues(priv->dev);
2656
2657 return 0;
2658
2659 lpiirq_error:
2660 if (priv->wol_irq != dev->irq)
2661 free_irq(priv->wol_irq, dev);
2662 wolirq_error:
2663 free_irq(dev->irq, dev);
2664 irq_error:
2665 if (dev->phydev)
2666 phy_stop(dev->phydev);
2667
2668 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2669 del_timer_sync(&priv->tx_queue[chan].txtimer);
2670
2671 stmmac_hw_teardown(dev);
2672 init_error:
2673 free_dma_desc_resources(priv);
2674 dma_desc_error:
2675 if (dev->phydev)
2676 phy_disconnect(dev->phydev);
2677
2678 return ret;
2679 }
2680
2681 /**
2682 * stmmac_release - close entry point of the driver
2683 * @dev : device pointer.
2684 * Description:
2685 * This is the stop entry point of the driver.
2686 */
stmmac_release(struct net_device * dev)2687 static int stmmac_release(struct net_device *dev)
2688 {
2689 struct stmmac_priv *priv = netdev_priv(dev);
2690 u32 chan;
2691
2692 if (priv->eee_enabled)
2693 del_timer_sync(&priv->eee_ctrl_timer);
2694
2695 /* Stop and disconnect the PHY */
2696 if (dev->phydev) {
2697 phy_stop(dev->phydev);
2698 phy_disconnect(dev->phydev);
2699 }
2700
2701 stmmac_disable_all_queues(priv);
2702
2703 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2704 del_timer_sync(&priv->tx_queue[chan].txtimer);
2705
2706 /* Free the IRQ lines */
2707 free_irq(dev->irq, dev);
2708 if (priv->wol_irq != dev->irq)
2709 free_irq(priv->wol_irq, dev);
2710 if (priv->lpi_irq > 0)
2711 free_irq(priv->lpi_irq, dev);
2712
2713 /* Stop TX/RX DMA and clear the descriptors */
2714 stmmac_stop_all_dma(priv);
2715
2716 /* Release and free the Rx/Tx resources */
2717 free_dma_desc_resources(priv);
2718
2719 /* Disable the MAC Rx/Tx */
2720 stmmac_mac_set(priv, priv->ioaddr, false);
2721
2722 netif_carrier_off(dev);
2723
2724 stmmac_release_ptp(priv);
2725
2726 return 0;
2727 }
2728
2729 /**
2730 * stmmac_tso_allocator - close entry point of the driver
2731 * @priv: driver private structure
2732 * @des: buffer start address
2733 * @total_len: total length to fill in descriptors
2734 * @last_segmant: condition for the last descriptor
2735 * @queue: TX queue index
2736 * Description:
2737 * This function fills descriptor and request new descriptors according to
2738 * buffer length to fill
2739 */
stmmac_tso_allocator(struct stmmac_priv * priv,unsigned int des,int total_len,bool last_segment,u32 queue)2740 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2741 int total_len, bool last_segment, u32 queue)
2742 {
2743 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2744 struct dma_desc *desc;
2745 u32 buff_size;
2746 int tmp_len;
2747
2748 tmp_len = total_len;
2749
2750 while (tmp_len > 0) {
2751 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2752 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2753 desc = tx_q->dma_tx + tx_q->cur_tx;
2754
2755 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2756 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2757 TSO_MAX_BUFF_SIZE : tmp_len;
2758
2759 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2760 0, 1,
2761 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2762 0, 0);
2763
2764 tmp_len -= TSO_MAX_BUFF_SIZE;
2765 }
2766 }
2767
2768 /**
2769 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2770 * @skb : the socket buffer
2771 * @dev : device pointer
2772 * Description: this is the transmit function that is called on TSO frames
2773 * (support available on GMAC4 and newer chips).
2774 * Diagram below show the ring programming in case of TSO frames:
2775 *
2776 * First Descriptor
2777 * --------
2778 * | DES0 |---> buffer1 = L2/L3/L4 header
2779 * | DES1 |---> TCP Payload (can continue on next descr...)
2780 * | DES2 |---> buffer 1 and 2 len
2781 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2782 * --------
2783 * |
2784 * ...
2785 * |
2786 * --------
2787 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
2788 * | DES1 | --|
2789 * | DES2 | --> buffer 1 and 2 len
2790 * | DES3 |
2791 * --------
2792 *
2793 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2794 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)2795 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2796 {
2797 struct dma_desc *desc, *first, *mss_desc = NULL;
2798 struct stmmac_priv *priv = netdev_priv(dev);
2799 int nfrags = skb_shinfo(skb)->nr_frags;
2800 u32 queue = skb_get_queue_mapping(skb);
2801 unsigned int first_entry, des;
2802 struct stmmac_tx_queue *tx_q;
2803 int tmp_pay_len = 0;
2804 u32 pay_len, mss;
2805 u8 proto_hdr_len;
2806 int i;
2807
2808 tx_q = &priv->tx_queue[queue];
2809
2810 /* Compute header lengths */
2811 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2812
2813 /* Desc availability based on threshold should be enough safe */
2814 if (unlikely(stmmac_tx_avail(priv, queue) <
2815 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2816 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2817 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2818 queue));
2819 /* This is a hard error, log it. */
2820 netdev_err(priv->dev,
2821 "%s: Tx Ring full when queue awake\n",
2822 __func__);
2823 }
2824 return NETDEV_TX_BUSY;
2825 }
2826
2827 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2828
2829 mss = skb_shinfo(skb)->gso_size;
2830
2831 /* set new MSS value if needed */
2832 if (mss != tx_q->mss) {
2833 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2834 stmmac_set_mss(priv, mss_desc, mss);
2835 tx_q->mss = mss;
2836 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2837 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2838 }
2839
2840 if (netif_msg_tx_queued(priv)) {
2841 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2842 __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2843 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2844 skb->data_len);
2845 }
2846
2847 first_entry = tx_q->cur_tx;
2848 WARN_ON(tx_q->tx_skbuff[first_entry]);
2849
2850 desc = tx_q->dma_tx + first_entry;
2851 first = desc;
2852
2853 /* first descriptor: fill Headers on Buf1 */
2854 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2855 DMA_TO_DEVICE);
2856 if (dma_mapping_error(priv->device, des))
2857 goto dma_map_err;
2858
2859 tx_q->tx_skbuff_dma[first_entry].buf = des;
2860 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2861
2862 first->des0 = cpu_to_le32(des);
2863
2864 /* Fill start of payload in buff2 of first descriptor */
2865 if (pay_len)
2866 first->des1 = cpu_to_le32(des + proto_hdr_len);
2867
2868 /* If needed take extra descriptors to fill the remaining payload */
2869 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2870
2871 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2872
2873 /* Prepare fragments */
2874 for (i = 0; i < nfrags; i++) {
2875 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2876
2877 des = skb_frag_dma_map(priv->device, frag, 0,
2878 skb_frag_size(frag),
2879 DMA_TO_DEVICE);
2880 if (dma_mapping_error(priv->device, des))
2881 goto dma_map_err;
2882
2883 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2884 (i == nfrags - 1), queue);
2885
2886 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2887 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2888 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2889 }
2890
2891 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2892
2893 /* Only the last descriptor gets to point to the skb. */
2894 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2895
2896 /* We've used all descriptors we need for this skb, however,
2897 * advance cur_tx so that it references a fresh descriptor.
2898 * ndo_start_xmit will fill this descriptor the next time it's
2899 * called and stmmac_tx_clean may clean up to this descriptor.
2900 */
2901 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2902
2903 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2904 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2905 __func__);
2906 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2907 }
2908
2909 dev->stats.tx_bytes += skb->len;
2910 priv->xstats.tx_tso_frames++;
2911 priv->xstats.tx_tso_nfrags += nfrags;
2912
2913 /* Manage tx mitigation */
2914 tx_q->tx_count_frames += nfrags + 1;
2915 if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
2916 !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
2917 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2918 priv->hwts_tx_en)) {
2919 stmmac_tx_timer_arm(priv, queue);
2920 } else {
2921 tx_q->tx_count_frames = 0;
2922 stmmac_set_tx_ic(priv, desc);
2923 priv->xstats.tx_set_ic_bit++;
2924 }
2925
2926 skb_tx_timestamp(skb);
2927
2928 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2929 priv->hwts_tx_en)) {
2930 /* declare that device is doing timestamping */
2931 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2932 stmmac_enable_tx_timestamp(priv, first);
2933 }
2934
2935 /* Complete the first descriptor before granting the DMA */
2936 stmmac_prepare_tso_tx_desc(priv, first, 1,
2937 proto_hdr_len,
2938 pay_len,
2939 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2940 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2941
2942 /* If context desc is used to change MSS */
2943 if (mss_desc) {
2944 /* Make sure that first descriptor has been completely
2945 * written, including its own bit. This is because MSS is
2946 * actually before first descriptor, so we need to make
2947 * sure that MSS's own bit is the last thing written.
2948 */
2949 dma_wmb();
2950 stmmac_set_tx_owner(priv, mss_desc);
2951 }
2952
2953 /* The own bit must be the latest setting done when prepare the
2954 * descriptor and then barrier is needed to make sure that
2955 * all is coherent before granting the DMA engine.
2956 */
2957 wmb();
2958
2959 if (netif_msg_pktdata(priv)) {
2960 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2961 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2962 tx_q->cur_tx, first, nfrags);
2963
2964 stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2965
2966 pr_info(">>> frame to be transmitted: ");
2967 print_pkt(skb->data, skb_headlen(skb));
2968 }
2969
2970 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2971
2972 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
2973 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2974 stmmac_tx_timer_arm(priv, queue);
2975
2976 return NETDEV_TX_OK;
2977
2978 dma_map_err:
2979 dev_err(priv->device, "Tx dma map failed\n");
2980 dev_kfree_skb(skb);
2981 priv->dev->stats.tx_dropped++;
2982 return NETDEV_TX_OK;
2983 }
2984
2985 /**
2986 * stmmac_xmit - Tx entry point of the driver
2987 * @skb : the socket buffer
2988 * @dev : device pointer
2989 * Description : this is the tx entry point of the driver.
2990 * It programs the chain or the ring and supports oversized frames
2991 * and SG feature.
2992 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)2993 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2994 {
2995 struct stmmac_priv *priv = netdev_priv(dev);
2996 unsigned int nopaged_len = skb_headlen(skb);
2997 int i, csum_insertion = 0, is_jumbo = 0;
2998 u32 queue = skb_get_queue_mapping(skb);
2999 int nfrags = skb_shinfo(skb)->nr_frags;
3000 int entry;
3001 unsigned int first_entry;
3002 struct dma_desc *desc, *first;
3003 struct stmmac_tx_queue *tx_q;
3004 unsigned int enh_desc;
3005 unsigned int des;
3006
3007 tx_q = &priv->tx_queue[queue];
3008
3009 if (priv->tx_path_in_lpi_mode)
3010 stmmac_disable_eee_mode(priv);
3011
3012 /* Manage oversized TCP frames for GMAC4 device */
3013 if (skb_is_gso(skb) && priv->tso) {
3014 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3015 return stmmac_tso_xmit(skb, dev);
3016 }
3017
3018 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3019 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3020 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3021 queue));
3022 /* This is a hard error, log it. */
3023 netdev_err(priv->dev,
3024 "%s: Tx Ring full when queue awake\n",
3025 __func__);
3026 }
3027 return NETDEV_TX_BUSY;
3028 }
3029
3030 entry = tx_q->cur_tx;
3031 first_entry = entry;
3032 WARN_ON(tx_q->tx_skbuff[first_entry]);
3033
3034 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3035
3036 if (likely(priv->extend_desc))
3037 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3038 else
3039 desc = tx_q->dma_tx + entry;
3040
3041 first = desc;
3042
3043 enh_desc = priv->plat->enh_desc;
3044 /* To program the descriptors according to the size of the frame */
3045 if (enh_desc)
3046 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3047
3048 if (unlikely(is_jumbo)) {
3049 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3050 if (unlikely(entry < 0) && (entry != -EINVAL))
3051 goto dma_map_err;
3052 }
3053
3054 for (i = 0; i < nfrags; i++) {
3055 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3056 int len = skb_frag_size(frag);
3057 bool last_segment = (i == (nfrags - 1));
3058
3059 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3060 WARN_ON(tx_q->tx_skbuff[entry]);
3061
3062 if (likely(priv->extend_desc))
3063 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3064 else
3065 desc = tx_q->dma_tx + entry;
3066
3067 des = skb_frag_dma_map(priv->device, frag, 0, len,
3068 DMA_TO_DEVICE);
3069 if (dma_mapping_error(priv->device, des))
3070 goto dma_map_err; /* should reuse desc w/o issues */
3071
3072 tx_q->tx_skbuff_dma[entry].buf = des;
3073
3074 stmmac_set_desc_addr(priv, desc, des);
3075
3076 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3077 tx_q->tx_skbuff_dma[entry].len = len;
3078 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3079
3080 /* Prepare the descriptor and set the own bit too */
3081 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3082 priv->mode, 1, last_segment, skb->len);
3083 }
3084
3085 /* Only the last descriptor gets to point to the skb. */
3086 tx_q->tx_skbuff[entry] = skb;
3087
3088 /* We've used all descriptors we need for this skb, however,
3089 * advance cur_tx so that it references a fresh descriptor.
3090 * ndo_start_xmit will fill this descriptor the next time it's
3091 * called and stmmac_tx_clean may clean up to this descriptor.
3092 */
3093 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3094 tx_q->cur_tx = entry;
3095
3096 if (netif_msg_pktdata(priv)) {
3097 void *tx_head;
3098
3099 netdev_dbg(priv->dev,
3100 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3101 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3102 entry, first, nfrags);
3103
3104 if (priv->extend_desc)
3105 tx_head = (void *)tx_q->dma_etx;
3106 else
3107 tx_head = (void *)tx_q->dma_tx;
3108
3109 stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3110
3111 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3112 print_pkt(skb->data, skb->len);
3113 }
3114
3115 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3116 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3117 __func__);
3118 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3119 }
3120
3121 dev->stats.tx_bytes += skb->len;
3122
3123 /* According to the coalesce parameter the IC bit for the latest
3124 * segment is reset and the timer re-started to clean the tx status.
3125 * This approach takes care about the fragments: desc is the first
3126 * element in case of no SG.
3127 */
3128 tx_q->tx_count_frames += nfrags + 1;
3129 if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3130 !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
3131 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3132 priv->hwts_tx_en)) {
3133 stmmac_tx_timer_arm(priv, queue);
3134 } else {
3135 tx_q->tx_count_frames = 0;
3136 stmmac_set_tx_ic(priv, desc);
3137 priv->xstats.tx_set_ic_bit++;
3138 }
3139
3140 skb_tx_timestamp(skb);
3141
3142 /* Ready to fill the first descriptor and set the OWN bit w/o any
3143 * problems because all the descriptors are actually ready to be
3144 * passed to the DMA engine.
3145 */
3146 if (likely(!is_jumbo)) {
3147 bool last_segment = (nfrags == 0);
3148
3149 des = dma_map_single(priv->device, skb->data,
3150 nopaged_len, DMA_TO_DEVICE);
3151 if (dma_mapping_error(priv->device, des))
3152 goto dma_map_err;
3153
3154 tx_q->tx_skbuff_dma[first_entry].buf = des;
3155
3156 stmmac_set_desc_addr(priv, first, des);
3157
3158 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3159 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3160
3161 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3162 priv->hwts_tx_en)) {
3163 /* declare that device is doing timestamping */
3164 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3165 stmmac_enable_tx_timestamp(priv, first);
3166 }
3167
3168 /* Prepare the first descriptor setting the OWN bit too */
3169 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3170 csum_insertion, priv->mode, 1, last_segment,
3171 skb->len);
3172 } else {
3173 stmmac_set_tx_owner(priv, first);
3174 }
3175
3176 /* The own bit must be the latest setting done when prepare the
3177 * descriptor and then barrier is needed to make sure that
3178 * all is coherent before granting the DMA engine.
3179 */
3180 wmb();
3181
3182 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3183
3184 stmmac_enable_dma_transmission(priv, priv->ioaddr);
3185
3186 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3187 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3188 stmmac_tx_timer_arm(priv, queue);
3189
3190 return NETDEV_TX_OK;
3191
3192 dma_map_err:
3193 netdev_err(priv->dev, "Tx DMA map failed\n");
3194 dev_kfree_skb(skb);
3195 priv->dev->stats.tx_dropped++;
3196 return NETDEV_TX_OK;
3197 }
3198
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)3199 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3200 {
3201 struct vlan_ethhdr *veth;
3202 __be16 vlan_proto;
3203 u16 vlanid;
3204
3205 veth = (struct vlan_ethhdr *)skb->data;
3206 vlan_proto = veth->h_vlan_proto;
3207
3208 if ((vlan_proto == htons(ETH_P_8021Q) &&
3209 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3210 (vlan_proto == htons(ETH_P_8021AD) &&
3211 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3212 /* pop the vlan tag */
3213 vlanid = ntohs(veth->h_vlan_TCI);
3214 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3215 skb_pull(skb, VLAN_HLEN);
3216 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3217 }
3218 }
3219
3220
stmmac_rx_threshold_count(struct stmmac_rx_queue * rx_q)3221 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3222 {
3223 if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3224 return 0;
3225
3226 return 1;
3227 }
3228
3229 /**
3230 * stmmac_rx_refill - refill used skb preallocated buffers
3231 * @priv: driver private structure
3232 * @queue: RX queue index
3233 * Description : this is to reallocate the skb for the reception process
3234 * that is based on zero-copy.
3235 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)3236 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3237 {
3238 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3239 int dirty = stmmac_rx_dirty(priv, queue);
3240 unsigned int entry = rx_q->dirty_rx;
3241
3242 int bfsize = priv->dma_buf_sz;
3243
3244 while (dirty-- > 0) {
3245 struct dma_desc *p;
3246
3247 if (priv->extend_desc)
3248 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3249 else
3250 p = rx_q->dma_rx + entry;
3251
3252 if (likely(!rx_q->rx_skbuff[entry])) {
3253 struct sk_buff *skb;
3254
3255 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3256 if (unlikely(!skb)) {
3257 /* so for a while no zero-copy! */
3258 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3259 if (unlikely(net_ratelimit()))
3260 dev_err(priv->device,
3261 "fail to alloc skb entry %d\n",
3262 entry);
3263 break;
3264 }
3265
3266 rx_q->rx_skbuff[entry] = skb;
3267 rx_q->rx_skbuff_dma[entry] =
3268 dma_map_single(priv->device, skb->data, bfsize,
3269 DMA_FROM_DEVICE);
3270 if (dma_mapping_error(priv->device,
3271 rx_q->rx_skbuff_dma[entry])) {
3272 netdev_err(priv->dev, "Rx DMA map failed\n");
3273 dev_kfree_skb(skb);
3274 break;
3275 }
3276
3277 stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3278 stmmac_refill_desc3(priv, rx_q, p);
3279
3280 if (rx_q->rx_zeroc_thresh > 0)
3281 rx_q->rx_zeroc_thresh--;
3282
3283 netif_dbg(priv, rx_status, priv->dev,
3284 "refill entry #%d\n", entry);
3285 }
3286 dma_wmb();
3287
3288 stmmac_set_rx_owner(priv, p, priv->use_riwt);
3289
3290 dma_wmb();
3291
3292 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3293 }
3294 rx_q->dirty_rx = entry;
3295 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3296 }
3297
3298 /**
3299 * stmmac_rx - manage the receive process
3300 * @priv: driver private structure
3301 * @limit: napi bugget
3302 * @queue: RX queue index.
3303 * Description : this the function called by the napi poll method.
3304 * It gets all the frames inside the ring.
3305 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)3306 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3307 {
3308 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3309 struct stmmac_channel *ch = &priv->channel[queue];
3310 unsigned int next_entry = rx_q->cur_rx;
3311 int coe = priv->hw->rx_csum;
3312 unsigned int count = 0;
3313 bool xmac;
3314
3315 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3316
3317 if (netif_msg_rx_status(priv)) {
3318 void *rx_head;
3319
3320 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3321 if (priv->extend_desc)
3322 rx_head = (void *)rx_q->dma_erx;
3323 else
3324 rx_head = (void *)rx_q->dma_rx;
3325
3326 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3327 }
3328 while (count < limit) {
3329 int entry, status;
3330 struct dma_desc *p;
3331 struct dma_desc *np;
3332
3333 entry = next_entry;
3334
3335 if (priv->extend_desc)
3336 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3337 else
3338 p = rx_q->dma_rx + entry;
3339
3340 /* read the status of the incoming frame */
3341 status = stmmac_rx_status(priv, &priv->dev->stats,
3342 &priv->xstats, p);
3343 /* check if managed by the DMA otherwise go ahead */
3344 if (unlikely(status & dma_own))
3345 break;
3346
3347 count++;
3348
3349 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3350 next_entry = rx_q->cur_rx;
3351
3352 if (priv->extend_desc)
3353 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3354 else
3355 np = rx_q->dma_rx + next_entry;
3356
3357 prefetch(np);
3358
3359 if (priv->extend_desc)
3360 stmmac_rx_extended_status(priv, &priv->dev->stats,
3361 &priv->xstats, rx_q->dma_erx + entry);
3362 if (unlikely(status == discard_frame)) {
3363 priv->dev->stats.rx_errors++;
3364 if (priv->hwts_rx_en && !priv->extend_desc) {
3365 /* DESC2 & DESC3 will be overwritten by device
3366 * with timestamp value, hence reinitialize
3367 * them in stmmac_rx_refill() function so that
3368 * device can reuse it.
3369 */
3370 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3371 rx_q->rx_skbuff[entry] = NULL;
3372 dma_unmap_single(priv->device,
3373 rx_q->rx_skbuff_dma[entry],
3374 priv->dma_buf_sz,
3375 DMA_FROM_DEVICE);
3376 }
3377 } else {
3378 struct sk_buff *skb;
3379 int frame_len;
3380 unsigned int des;
3381
3382 stmmac_get_desc_addr(priv, p, &des);
3383 frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3384
3385 /* If frame length is greater than skb buffer size
3386 * (preallocated during init) then the packet is
3387 * ignored
3388 */
3389 if (frame_len > priv->dma_buf_sz) {
3390 if (net_ratelimit())
3391 netdev_err(priv->dev,
3392 "len %d larger than size (%d)\n",
3393 frame_len, priv->dma_buf_sz);
3394 priv->dev->stats.rx_length_errors++;
3395 continue;
3396 }
3397
3398 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3399 * Type frames (LLC/LLC-SNAP)
3400 *
3401 * llc_snap is never checked in GMAC >= 4, so this ACS
3402 * feature is always disabled and packets need to be
3403 * stripped manually.
3404 */
3405 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3406 unlikely(status != llc_snap))
3407 frame_len -= ETH_FCS_LEN;
3408
3409 if (netif_msg_rx_status(priv)) {
3410 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3411 p, entry, des);
3412 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3413 frame_len, status);
3414 }
3415
3416 /* The zero-copy is always used for all the sizes
3417 * in case of GMAC4 because it needs
3418 * to refill the used descriptors, always.
3419 */
3420 if (unlikely(!xmac &&
3421 ((frame_len < priv->rx_copybreak) ||
3422 stmmac_rx_threshold_count(rx_q)))) {
3423 skb = netdev_alloc_skb_ip_align(priv->dev,
3424 frame_len);
3425 if (unlikely(!skb)) {
3426 if (net_ratelimit())
3427 dev_warn(priv->device,
3428 "packet dropped\n");
3429 priv->dev->stats.rx_dropped++;
3430 continue;
3431 }
3432
3433 dma_sync_single_for_cpu(priv->device,
3434 rx_q->rx_skbuff_dma
3435 [entry], frame_len,
3436 DMA_FROM_DEVICE);
3437 skb_copy_to_linear_data(skb,
3438 rx_q->
3439 rx_skbuff[entry]->data,
3440 frame_len);
3441
3442 skb_put(skb, frame_len);
3443 dma_sync_single_for_device(priv->device,
3444 rx_q->rx_skbuff_dma
3445 [entry], frame_len,
3446 DMA_FROM_DEVICE);
3447 } else {
3448 skb = rx_q->rx_skbuff[entry];
3449 if (unlikely(!skb)) {
3450 if (net_ratelimit())
3451 netdev_err(priv->dev,
3452 "%s: Inconsistent Rx chain\n",
3453 priv->dev->name);
3454 priv->dev->stats.rx_dropped++;
3455 continue;
3456 }
3457 prefetch(skb->data - NET_IP_ALIGN);
3458 rx_q->rx_skbuff[entry] = NULL;
3459 rx_q->rx_zeroc_thresh++;
3460
3461 skb_put(skb, frame_len);
3462 dma_unmap_single(priv->device,
3463 rx_q->rx_skbuff_dma[entry],
3464 priv->dma_buf_sz,
3465 DMA_FROM_DEVICE);
3466 }
3467
3468 if (netif_msg_pktdata(priv)) {
3469 netdev_dbg(priv->dev, "frame received (%dbytes)",
3470 frame_len);
3471 print_pkt(skb->data, frame_len);
3472 }
3473
3474 stmmac_get_rx_hwtstamp(priv, p, np, skb);
3475
3476 stmmac_rx_vlan(priv->dev, skb);
3477
3478 skb->protocol = eth_type_trans(skb, priv->dev);
3479
3480 if (unlikely(!coe))
3481 skb_checksum_none_assert(skb);
3482 else
3483 skb->ip_summed = CHECKSUM_UNNECESSARY;
3484
3485 napi_gro_receive(&ch->napi, skb);
3486
3487 priv->dev->stats.rx_packets++;
3488 priv->dev->stats.rx_bytes += frame_len;
3489 }
3490 }
3491
3492 stmmac_rx_refill(priv, queue);
3493
3494 priv->xstats.rx_pkt_n += count;
3495
3496 return count;
3497 }
3498
3499 /**
3500 * stmmac_poll - stmmac poll method (NAPI)
3501 * @napi : pointer to the napi structure.
3502 * @budget : maximum number of packets that the current CPU can receive from
3503 * all interfaces.
3504 * Description :
3505 * To look at the incoming frames and clear the tx resources.
3506 */
stmmac_napi_poll(struct napi_struct * napi,int budget)3507 static int stmmac_napi_poll(struct napi_struct *napi, int budget)
3508 {
3509 struct stmmac_channel *ch =
3510 container_of(napi, struct stmmac_channel, napi);
3511 struct stmmac_priv *priv = ch->priv_data;
3512 int work_done, rx_done = 0, tx_done = 0;
3513 u32 chan = ch->index;
3514
3515 priv->xstats.napi_poll++;
3516
3517 if (ch->has_tx)
3518 tx_done = stmmac_tx_clean(priv, budget, chan);
3519 if (ch->has_rx)
3520 rx_done = stmmac_rx(priv, budget, chan);
3521
3522 work_done = max(rx_done, tx_done);
3523 work_done = min(work_done, budget);
3524
3525 if (work_done < budget && napi_complete_done(napi, work_done)) {
3526 int stat;
3527
3528 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3529 stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
3530 &priv->xstats, chan);
3531 if (stat && napi_reschedule(napi))
3532 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
3533 }
3534
3535 return work_done;
3536 }
3537
3538 /**
3539 * stmmac_tx_timeout
3540 * @dev : Pointer to net device structure
3541 * Description: this function is called when a packet transmission fails to
3542 * complete within a reasonable time. The driver will mark the error in the
3543 * netdev structure and arrange for the device to be reset to a sane state
3544 * in order to transmit a new packet.
3545 */
stmmac_tx_timeout(struct net_device * dev)3546 static void stmmac_tx_timeout(struct net_device *dev)
3547 {
3548 struct stmmac_priv *priv = netdev_priv(dev);
3549
3550 stmmac_global_err(priv);
3551 }
3552
3553 /**
3554 * stmmac_set_rx_mode - entry point for multicast addressing
3555 * @dev : pointer to the device structure
3556 * Description:
3557 * This function is a driver entry point which gets called by the kernel
3558 * whenever multicast addresses must be enabled/disabled.
3559 * Return value:
3560 * void.
3561 */
stmmac_set_rx_mode(struct net_device * dev)3562 static void stmmac_set_rx_mode(struct net_device *dev)
3563 {
3564 struct stmmac_priv *priv = netdev_priv(dev);
3565
3566 stmmac_set_filter(priv, priv->hw, dev);
3567 }
3568
3569 /**
3570 * stmmac_change_mtu - entry point to change MTU size for the device.
3571 * @dev : device pointer.
3572 * @new_mtu : the new MTU size for the device.
3573 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
3574 * to drive packet transmission. Ethernet has an MTU of 1500 octets
3575 * (ETH_DATA_LEN). This value can be changed with ifconfig.
3576 * Return value:
3577 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3578 * file on failure.
3579 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)3580 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3581 {
3582 struct stmmac_priv *priv = netdev_priv(dev);
3583 int txfifosz = priv->plat->tx_fifo_size;
3584
3585 if (txfifosz == 0)
3586 txfifosz = priv->dma_cap.tx_fifo_size;
3587
3588 txfifosz /= priv->plat->tx_queues_to_use;
3589
3590 if (netif_running(dev)) {
3591 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3592 return -EBUSY;
3593 }
3594
3595 new_mtu = STMMAC_ALIGN(new_mtu);
3596
3597 /* If condition true, FIFO is too small or MTU too large */
3598 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
3599 return -EINVAL;
3600
3601 dev->mtu = new_mtu;
3602
3603 netdev_update_features(dev);
3604
3605 return 0;
3606 }
3607
stmmac_fix_features(struct net_device * dev,netdev_features_t features)3608 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3609 netdev_features_t features)
3610 {
3611 struct stmmac_priv *priv = netdev_priv(dev);
3612
3613 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3614 features &= ~NETIF_F_RXCSUM;
3615
3616 if (!priv->plat->tx_coe)
3617 features &= ~NETIF_F_CSUM_MASK;
3618
3619 /* Some GMAC devices have a bugged Jumbo frame support that
3620 * needs to have the Tx COE disabled for oversized frames
3621 * (due to limited buffer sizes). In this case we disable
3622 * the TX csum insertion in the TDES and not use SF.
3623 */
3624 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3625 features &= ~NETIF_F_CSUM_MASK;
3626
3627 /* Disable tso if asked by ethtool */
3628 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3629 if (features & NETIF_F_TSO)
3630 priv->tso = true;
3631 else
3632 priv->tso = false;
3633 }
3634
3635 return features;
3636 }
3637
stmmac_set_features(struct net_device * netdev,netdev_features_t features)3638 static int stmmac_set_features(struct net_device *netdev,
3639 netdev_features_t features)
3640 {
3641 struct stmmac_priv *priv = netdev_priv(netdev);
3642
3643 /* Keep the COE Type in case of csum is supporting */
3644 if (features & NETIF_F_RXCSUM)
3645 priv->hw->rx_csum = priv->plat->rx_coe;
3646 else
3647 priv->hw->rx_csum = 0;
3648 /* No check needed because rx_coe has been set before and it will be
3649 * fixed in case of issue.
3650 */
3651 stmmac_rx_ipc(priv, priv->hw);
3652
3653 return 0;
3654 }
3655
3656 /**
3657 * stmmac_interrupt - main ISR
3658 * @irq: interrupt number.
3659 * @dev_id: to pass the net device pointer (must be valid).
3660 * Description: this is the main driver interrupt service routine.
3661 * It can call:
3662 * o DMA service routine (to manage incoming frame reception and transmission
3663 * status)
3664 * o Core interrupts to manage: remote wake-up, management counter, LPI
3665 * interrupts.
3666 */
stmmac_interrupt(int irq,void * dev_id)3667 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3668 {
3669 struct net_device *dev = (struct net_device *)dev_id;
3670 struct stmmac_priv *priv = netdev_priv(dev);
3671 u32 rx_cnt = priv->plat->rx_queues_to_use;
3672 u32 tx_cnt = priv->plat->tx_queues_to_use;
3673 u32 queues_count;
3674 u32 queue;
3675 bool xmac;
3676
3677 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3678 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3679
3680 if (priv->irq_wake)
3681 pm_wakeup_event(priv->device, 0);
3682
3683 /* Check if adapter is up */
3684 if (test_bit(STMMAC_DOWN, &priv->state))
3685 return IRQ_HANDLED;
3686 /* Check if a fatal error happened */
3687 if (stmmac_safety_feat_interrupt(priv))
3688 return IRQ_HANDLED;
3689
3690 /* To handle GMAC own interrupts */
3691 if ((priv->plat->has_gmac) || xmac) {
3692 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3693 int mtl_status;
3694
3695 if (unlikely(status)) {
3696 /* For LPI we need to save the tx status */
3697 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3698 priv->tx_path_in_lpi_mode = true;
3699 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3700 priv->tx_path_in_lpi_mode = false;
3701 }
3702
3703 for (queue = 0; queue < queues_count; queue++) {
3704 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3705
3706 mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3707 queue);
3708 if (mtl_status != -EINVAL)
3709 status |= mtl_status;
3710
3711 if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3712 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3713 rx_q->rx_tail_addr,
3714 queue);
3715 }
3716
3717 /* PCS link status */
3718 if (priv->hw->pcs) {
3719 if (priv->xstats.pcs_link)
3720 netif_carrier_on(dev);
3721 else
3722 netif_carrier_off(dev);
3723 }
3724 }
3725
3726 /* To handle DMA interrupts */
3727 stmmac_dma_interrupt(priv);
3728
3729 return IRQ_HANDLED;
3730 }
3731
3732 #ifdef CONFIG_NET_POLL_CONTROLLER
3733 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3734 * to allow network I/O with interrupts disabled.
3735 */
stmmac_poll_controller(struct net_device * dev)3736 static void stmmac_poll_controller(struct net_device *dev)
3737 {
3738 disable_irq(dev->irq);
3739 stmmac_interrupt(dev->irq, dev);
3740 enable_irq(dev->irq);
3741 }
3742 #endif
3743
3744 /**
3745 * stmmac_ioctl - Entry point for the Ioctl
3746 * @dev: Device pointer.
3747 * @rq: An IOCTL specefic structure, that can contain a pointer to
3748 * a proprietary structure used to pass information to the driver.
3749 * @cmd: IOCTL command
3750 * Description:
3751 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3752 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)3753 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3754 {
3755 int ret = -EOPNOTSUPP;
3756
3757 if (!netif_running(dev))
3758 return -EINVAL;
3759
3760 switch (cmd) {
3761 case SIOCGMIIPHY:
3762 case SIOCGMIIREG:
3763 case SIOCSMIIREG:
3764 if (!dev->phydev)
3765 return -EINVAL;
3766 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3767 break;
3768 case SIOCSHWTSTAMP:
3769 ret = stmmac_hwtstamp_ioctl(dev, rq);
3770 break;
3771 default:
3772 break;
3773 }
3774
3775 return ret;
3776 }
3777
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)3778 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3779 void *cb_priv)
3780 {
3781 struct stmmac_priv *priv = cb_priv;
3782 int ret = -EOPNOTSUPP;
3783
3784 stmmac_disable_all_queues(priv);
3785
3786 switch (type) {
3787 case TC_SETUP_CLSU32:
3788 if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3789 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3790 break;
3791 default:
3792 break;
3793 }
3794
3795 stmmac_enable_all_queues(priv);
3796 return ret;
3797 }
3798
stmmac_setup_tc_block(struct stmmac_priv * priv,struct tc_block_offload * f)3799 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3800 struct tc_block_offload *f)
3801 {
3802 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3803 return -EOPNOTSUPP;
3804
3805 switch (f->command) {
3806 case TC_BLOCK_BIND:
3807 return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3808 priv, priv, f->extack);
3809 case TC_BLOCK_UNBIND:
3810 tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3811 return 0;
3812 default:
3813 return -EOPNOTSUPP;
3814 }
3815 }
3816
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)3817 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3818 void *type_data)
3819 {
3820 struct stmmac_priv *priv = netdev_priv(ndev);
3821
3822 switch (type) {
3823 case TC_SETUP_BLOCK:
3824 return stmmac_setup_tc_block(priv, type_data);
3825 case TC_SETUP_QDISC_CBS:
3826 return stmmac_tc_setup_cbs(priv, priv, type_data);
3827 default:
3828 return -EOPNOTSUPP;
3829 }
3830 }
3831
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev,select_queue_fallback_t fallback)3832 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
3833 struct net_device *sb_dev,
3834 select_queue_fallback_t fallback)
3835 {
3836 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3837 /*
3838 * There is no way to determine the number of TSO
3839 * capable Queues. Let's use always the Queue 0
3840 * because if TSO is supported then at least this
3841 * one will be capable.
3842 */
3843 return 0;
3844 }
3845
3846 return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
3847 }
3848
stmmac_set_mac_address(struct net_device * ndev,void * addr)3849 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3850 {
3851 struct stmmac_priv *priv = netdev_priv(ndev);
3852 int ret = 0;
3853
3854 ret = eth_mac_addr(ndev, addr);
3855 if (ret)
3856 return ret;
3857
3858 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3859
3860 return ret;
3861 }
3862
3863 #ifdef CONFIG_DEBUG_FS
3864 static struct dentry *stmmac_fs_dir;
3865
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq)3866 static void sysfs_display_ring(void *head, int size, int extend_desc,
3867 struct seq_file *seq)
3868 {
3869 int i;
3870 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3871 struct dma_desc *p = (struct dma_desc *)head;
3872
3873 for (i = 0; i < size; i++) {
3874 if (extend_desc) {
3875 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3876 i, (unsigned int)virt_to_phys(ep),
3877 le32_to_cpu(ep->basic.des0),
3878 le32_to_cpu(ep->basic.des1),
3879 le32_to_cpu(ep->basic.des2),
3880 le32_to_cpu(ep->basic.des3));
3881 ep++;
3882 } else {
3883 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3884 i, (unsigned int)virt_to_phys(p),
3885 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3886 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3887 p++;
3888 }
3889 seq_printf(seq, "\n");
3890 }
3891 }
3892
stmmac_sysfs_ring_read(struct seq_file * seq,void * v)3893 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3894 {
3895 struct net_device *dev = seq->private;
3896 struct stmmac_priv *priv = netdev_priv(dev);
3897 u32 rx_count = priv->plat->rx_queues_to_use;
3898 u32 tx_count = priv->plat->tx_queues_to_use;
3899 u32 queue;
3900
3901 if ((dev->flags & IFF_UP) == 0)
3902 return 0;
3903
3904 for (queue = 0; queue < rx_count; queue++) {
3905 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3906
3907 seq_printf(seq, "RX Queue %d:\n", queue);
3908
3909 if (priv->extend_desc) {
3910 seq_printf(seq, "Extended descriptor ring:\n");
3911 sysfs_display_ring((void *)rx_q->dma_erx,
3912 DMA_RX_SIZE, 1, seq);
3913 } else {
3914 seq_printf(seq, "Descriptor ring:\n");
3915 sysfs_display_ring((void *)rx_q->dma_rx,
3916 DMA_RX_SIZE, 0, seq);
3917 }
3918 }
3919
3920 for (queue = 0; queue < tx_count; queue++) {
3921 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3922
3923 seq_printf(seq, "TX Queue %d:\n", queue);
3924
3925 if (priv->extend_desc) {
3926 seq_printf(seq, "Extended descriptor ring:\n");
3927 sysfs_display_ring((void *)tx_q->dma_etx,
3928 DMA_TX_SIZE, 1, seq);
3929 } else {
3930 seq_printf(seq, "Descriptor ring:\n");
3931 sysfs_display_ring((void *)tx_q->dma_tx,
3932 DMA_TX_SIZE, 0, seq);
3933 }
3934 }
3935
3936 return 0;
3937 }
3938
stmmac_sysfs_ring_open(struct inode * inode,struct file * file)3939 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3940 {
3941 return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3942 }
3943
3944 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3945
3946 static const struct file_operations stmmac_rings_status_fops = {
3947 .owner = THIS_MODULE,
3948 .open = stmmac_sysfs_ring_open,
3949 .read = seq_read,
3950 .llseek = seq_lseek,
3951 .release = single_release,
3952 };
3953
stmmac_sysfs_dma_cap_read(struct seq_file * seq,void * v)3954 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3955 {
3956 struct net_device *dev = seq->private;
3957 struct stmmac_priv *priv = netdev_priv(dev);
3958
3959 if (!priv->hw_cap_support) {
3960 seq_printf(seq, "DMA HW features not supported\n");
3961 return 0;
3962 }
3963
3964 seq_printf(seq, "==============================\n");
3965 seq_printf(seq, "\tDMA HW features\n");
3966 seq_printf(seq, "==============================\n");
3967
3968 seq_printf(seq, "\t10/100 Mbps: %s\n",
3969 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3970 seq_printf(seq, "\t1000 Mbps: %s\n",
3971 (priv->dma_cap.mbps_1000) ? "Y" : "N");
3972 seq_printf(seq, "\tHalf duplex: %s\n",
3973 (priv->dma_cap.half_duplex) ? "Y" : "N");
3974 seq_printf(seq, "\tHash Filter: %s\n",
3975 (priv->dma_cap.hash_filter) ? "Y" : "N");
3976 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3977 (priv->dma_cap.multi_addr) ? "Y" : "N");
3978 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3979 (priv->dma_cap.pcs) ? "Y" : "N");
3980 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3981 (priv->dma_cap.sma_mdio) ? "Y" : "N");
3982 seq_printf(seq, "\tPMT Remote wake up: %s\n",
3983 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3984 seq_printf(seq, "\tPMT Magic Frame: %s\n",
3985 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3986 seq_printf(seq, "\tRMON module: %s\n",
3987 (priv->dma_cap.rmon) ? "Y" : "N");
3988 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3989 (priv->dma_cap.time_stamp) ? "Y" : "N");
3990 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3991 (priv->dma_cap.atime_stamp) ? "Y" : "N");
3992 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3993 (priv->dma_cap.eee) ? "Y" : "N");
3994 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3995 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3996 (priv->dma_cap.tx_coe) ? "Y" : "N");
3997 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3998 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3999 (priv->dma_cap.rx_coe) ? "Y" : "N");
4000 } else {
4001 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4002 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4003 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4004 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4005 }
4006 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4007 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4008 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4009 priv->dma_cap.number_rx_channel);
4010 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4011 priv->dma_cap.number_tx_channel);
4012 seq_printf(seq, "\tEnhanced descriptors: %s\n",
4013 (priv->dma_cap.enh_desc) ? "Y" : "N");
4014
4015 return 0;
4016 }
4017
stmmac_sysfs_dma_cap_open(struct inode * inode,struct file * file)4018 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
4019 {
4020 return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
4021 }
4022
4023 static const struct file_operations stmmac_dma_cap_fops = {
4024 .owner = THIS_MODULE,
4025 .open = stmmac_sysfs_dma_cap_open,
4026 .read = seq_read,
4027 .llseek = seq_lseek,
4028 .release = single_release,
4029 };
4030
stmmac_init_fs(struct net_device * dev)4031 static int stmmac_init_fs(struct net_device *dev)
4032 {
4033 struct stmmac_priv *priv = netdev_priv(dev);
4034
4035 /* Create per netdev entries */
4036 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4037
4038 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4039 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4040
4041 return -ENOMEM;
4042 }
4043
4044 /* Entry to report DMA RX/TX rings */
4045 priv->dbgfs_rings_status =
4046 debugfs_create_file("descriptors_status", 0444,
4047 priv->dbgfs_dir, dev,
4048 &stmmac_rings_status_fops);
4049
4050 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4051 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4052 debugfs_remove_recursive(priv->dbgfs_dir);
4053
4054 return -ENOMEM;
4055 }
4056
4057 /* Entry to report the DMA HW features */
4058 priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4059 priv->dbgfs_dir,
4060 dev, &stmmac_dma_cap_fops);
4061
4062 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4063 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4064 debugfs_remove_recursive(priv->dbgfs_dir);
4065
4066 return -ENOMEM;
4067 }
4068
4069 return 0;
4070 }
4071
stmmac_exit_fs(struct net_device * dev)4072 static void stmmac_exit_fs(struct net_device *dev)
4073 {
4074 struct stmmac_priv *priv = netdev_priv(dev);
4075
4076 debugfs_remove_recursive(priv->dbgfs_dir);
4077 }
4078 #endif /* CONFIG_DEBUG_FS */
4079
4080 static const struct net_device_ops stmmac_netdev_ops = {
4081 .ndo_open = stmmac_open,
4082 .ndo_start_xmit = stmmac_xmit,
4083 .ndo_stop = stmmac_release,
4084 .ndo_change_mtu = stmmac_change_mtu,
4085 .ndo_fix_features = stmmac_fix_features,
4086 .ndo_set_features = stmmac_set_features,
4087 .ndo_set_rx_mode = stmmac_set_rx_mode,
4088 .ndo_tx_timeout = stmmac_tx_timeout,
4089 .ndo_do_ioctl = stmmac_ioctl,
4090 .ndo_setup_tc = stmmac_setup_tc,
4091 .ndo_select_queue = stmmac_select_queue,
4092 #ifdef CONFIG_NET_POLL_CONTROLLER
4093 .ndo_poll_controller = stmmac_poll_controller,
4094 #endif
4095 .ndo_set_mac_address = stmmac_set_mac_address,
4096 };
4097
stmmac_reset_subtask(struct stmmac_priv * priv)4098 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4099 {
4100 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4101 return;
4102 if (test_bit(STMMAC_DOWN, &priv->state))
4103 return;
4104
4105 netdev_err(priv->dev, "Reset adapter.\n");
4106
4107 rtnl_lock();
4108 netif_trans_update(priv->dev);
4109 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4110 usleep_range(1000, 2000);
4111
4112 set_bit(STMMAC_DOWN, &priv->state);
4113 dev_close(priv->dev);
4114 dev_open(priv->dev);
4115 clear_bit(STMMAC_DOWN, &priv->state);
4116 clear_bit(STMMAC_RESETING, &priv->state);
4117 rtnl_unlock();
4118 }
4119
stmmac_service_task(struct work_struct * work)4120 static void stmmac_service_task(struct work_struct *work)
4121 {
4122 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4123 service_task);
4124
4125 stmmac_reset_subtask(priv);
4126 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4127 }
4128
4129 /**
4130 * stmmac_hw_init - Init the MAC device
4131 * @priv: driver private structure
4132 * Description: this function is to configure the MAC device according to
4133 * some platform parameters or the HW capability register. It prepares the
4134 * driver to use either ring or chain modes and to setup either enhanced or
4135 * normal descriptors.
4136 */
stmmac_hw_init(struct stmmac_priv * priv)4137 static int stmmac_hw_init(struct stmmac_priv *priv)
4138 {
4139 int ret;
4140
4141 /* dwmac-sun8i only work in chain mode */
4142 if (priv->plat->has_sun8i)
4143 chain_mode = 1;
4144 priv->chain_mode = chain_mode;
4145
4146 /* Initialize HW Interface */
4147 ret = stmmac_hwif_init(priv);
4148 if (ret)
4149 return ret;
4150
4151 /* Get the HW capability (new GMAC newer than 3.50a) */
4152 priv->hw_cap_support = stmmac_get_hw_features(priv);
4153 if (priv->hw_cap_support) {
4154 dev_info(priv->device, "DMA HW capability register supported\n");
4155
4156 /* We can override some gmac/dma configuration fields: e.g.
4157 * enh_desc, tx_coe (e.g. that are passed through the
4158 * platform) with the values from the HW capability
4159 * register (if supported).
4160 */
4161 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4162 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4163 priv->hw->pmt = priv->plat->pmt;
4164
4165 /* TXCOE doesn't work in thresh DMA mode */
4166 if (priv->plat->force_thresh_dma_mode)
4167 priv->plat->tx_coe = 0;
4168 else
4169 priv->plat->tx_coe = priv->dma_cap.tx_coe;
4170
4171 /* In case of GMAC4 rx_coe is from HW cap register. */
4172 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4173
4174 if (priv->dma_cap.rx_coe_type2)
4175 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4176 else if (priv->dma_cap.rx_coe_type1)
4177 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4178
4179 } else {
4180 dev_info(priv->device, "No HW DMA feature register supported\n");
4181 }
4182
4183 if (priv->plat->rx_coe) {
4184 priv->hw->rx_csum = priv->plat->rx_coe;
4185 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4186 if (priv->synopsys_id < DWMAC_CORE_4_00)
4187 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4188 }
4189 if (priv->plat->tx_coe)
4190 dev_info(priv->device, "TX Checksum insertion supported\n");
4191
4192 if (priv->plat->pmt) {
4193 dev_info(priv->device, "Wake-Up On Lan supported\n");
4194 device_set_wakeup_capable(priv->device, 1);
4195 }
4196
4197 if (priv->dma_cap.tsoen)
4198 dev_info(priv->device, "TSO supported\n");
4199
4200 /* Run HW quirks, if any */
4201 if (priv->hwif_quirks) {
4202 ret = priv->hwif_quirks(priv);
4203 if (ret)
4204 return ret;
4205 }
4206
4207 /* Rx Watchdog is available in the COREs newer than the 3.40.
4208 * In some case, for example on bugged HW this feature
4209 * has to be disable and this can be done by passing the
4210 * riwt_off field from the platform.
4211 */
4212 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4213 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4214 priv->use_riwt = 1;
4215 dev_info(priv->device,
4216 "Enable RX Mitigation via HW Watchdog Timer\n");
4217 }
4218
4219 return 0;
4220 }
4221
4222 /**
4223 * stmmac_dvr_probe
4224 * @device: device pointer
4225 * @plat_dat: platform data pointer
4226 * @res: stmmac resource pointer
4227 * Description: this is the main probe function used to
4228 * call the alloc_etherdev, allocate the priv structure.
4229 * Return:
4230 * returns 0 on success, otherwise errno.
4231 */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)4232 int stmmac_dvr_probe(struct device *device,
4233 struct plat_stmmacenet_data *plat_dat,
4234 struct stmmac_resources *res)
4235 {
4236 struct net_device *ndev = NULL;
4237 struct stmmac_priv *priv;
4238 u32 queue, maxq;
4239 int ret = 0;
4240
4241 ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4242 MTL_MAX_TX_QUEUES,
4243 MTL_MAX_RX_QUEUES);
4244 if (!ndev)
4245 return -ENOMEM;
4246
4247 SET_NETDEV_DEV(ndev, device);
4248
4249 priv = netdev_priv(ndev);
4250 priv->device = device;
4251 priv->dev = ndev;
4252
4253 stmmac_set_ethtool_ops(ndev);
4254 priv->pause = pause;
4255 priv->plat = plat_dat;
4256 priv->ioaddr = res->addr;
4257 priv->dev->base_addr = (unsigned long)res->addr;
4258
4259 priv->dev->irq = res->irq;
4260 priv->wol_irq = res->wol_irq;
4261 priv->lpi_irq = res->lpi_irq;
4262
4263 if (res->mac)
4264 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4265
4266 dev_set_drvdata(device, priv->dev);
4267
4268 /* Verify driver arguments */
4269 stmmac_verify_args();
4270
4271 /* Allocate workqueue */
4272 priv->wq = create_singlethread_workqueue("stmmac_wq");
4273 if (!priv->wq) {
4274 dev_err(priv->device, "failed to create workqueue\n");
4275 ret = -ENOMEM;
4276 goto error_wq;
4277 }
4278
4279 INIT_WORK(&priv->service_task, stmmac_service_task);
4280
4281 /* Override with kernel parameters if supplied XXX CRS XXX
4282 * this needs to have multiple instances
4283 */
4284 if ((phyaddr >= 0) && (phyaddr <= 31))
4285 priv->plat->phy_addr = phyaddr;
4286
4287 if (priv->plat->stmmac_rst) {
4288 ret = reset_control_assert(priv->plat->stmmac_rst);
4289 reset_control_deassert(priv->plat->stmmac_rst);
4290 /* Some reset controllers have only reset callback instead of
4291 * assert + deassert callbacks pair.
4292 */
4293 if (ret == -ENOTSUPP)
4294 reset_control_reset(priv->plat->stmmac_rst);
4295 }
4296
4297 /* Init MAC and get the capabilities */
4298 ret = stmmac_hw_init(priv);
4299 if (ret)
4300 goto error_hw_init;
4301
4302 stmmac_check_ether_addr(priv);
4303
4304 /* Configure real RX and TX queues */
4305 netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4306 netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4307
4308 ndev->netdev_ops = &stmmac_netdev_ops;
4309
4310 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4311 NETIF_F_RXCSUM;
4312
4313 ret = stmmac_tc_init(priv, priv);
4314 if (!ret) {
4315 ndev->hw_features |= NETIF_F_HW_TC;
4316 }
4317
4318 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4319 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4320 priv->tso = true;
4321 dev_info(priv->device, "TSO feature enabled\n");
4322 }
4323 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4324 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4325 #ifdef STMMAC_VLAN_TAG_USED
4326 /* Both mac100 and gmac support receive VLAN tag detection */
4327 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4328 #endif
4329 priv->msg_enable = netif_msg_init(debug, default_msg_level);
4330
4331 /* MTU range: 46 - hw-specific max */
4332 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4333 if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4334 ndev->max_mtu = JUMBO_LEN;
4335 else if (priv->plat->has_xgmac)
4336 ndev->max_mtu = XGMAC_JUMBO_LEN;
4337 else
4338 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4339 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4340 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4341 */
4342 if ((priv->plat->maxmtu < ndev->max_mtu) &&
4343 (priv->plat->maxmtu >= ndev->min_mtu))
4344 ndev->max_mtu = priv->plat->maxmtu;
4345 else if (priv->plat->maxmtu < ndev->min_mtu)
4346 dev_warn(priv->device,
4347 "%s: warning: maxmtu having invalid value (%d)\n",
4348 __func__, priv->plat->maxmtu);
4349
4350 if (flow_ctrl)
4351 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
4352
4353 /* Setup channels NAPI */
4354 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4355
4356 for (queue = 0; queue < maxq; queue++) {
4357 struct stmmac_channel *ch = &priv->channel[queue];
4358
4359 ch->priv_data = priv;
4360 ch->index = queue;
4361
4362 if (queue < priv->plat->rx_queues_to_use)
4363 ch->has_rx = true;
4364 if (queue < priv->plat->tx_queues_to_use)
4365 ch->has_tx = true;
4366
4367 netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
4368 NAPI_POLL_WEIGHT);
4369 }
4370
4371 mutex_init(&priv->lock);
4372
4373 /* If a specific clk_csr value is passed from the platform
4374 * this means that the CSR Clock Range selection cannot be
4375 * changed at run-time and it is fixed. Viceversa the driver'll try to
4376 * set the MDC clock dynamically according to the csr actual
4377 * clock input.
4378 */
4379 if (!priv->plat->clk_csr)
4380 stmmac_clk_csr_set(priv);
4381 else
4382 priv->clk_csr = priv->plat->clk_csr;
4383
4384 stmmac_check_pcs_mode(priv);
4385
4386 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4387 priv->hw->pcs != STMMAC_PCS_TBI &&
4388 priv->hw->pcs != STMMAC_PCS_RTBI) {
4389 /* MDIO bus Registration */
4390 ret = stmmac_mdio_register(ndev);
4391 if (ret < 0) {
4392 dev_err(priv->device,
4393 "%s: MDIO bus (id: %d) registration failed",
4394 __func__, priv->plat->bus_id);
4395 goto error_mdio_register;
4396 }
4397 }
4398
4399 ret = register_netdev(ndev);
4400 if (ret) {
4401 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4402 __func__, ret);
4403 goto error_netdev_register;
4404 }
4405
4406 #ifdef CONFIG_DEBUG_FS
4407 ret = stmmac_init_fs(ndev);
4408 if (ret < 0)
4409 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
4410 __func__);
4411 #endif
4412
4413 return ret;
4414
4415 error_netdev_register:
4416 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4417 priv->hw->pcs != STMMAC_PCS_TBI &&
4418 priv->hw->pcs != STMMAC_PCS_RTBI)
4419 stmmac_mdio_unregister(ndev);
4420 error_mdio_register:
4421 for (queue = 0; queue < maxq; queue++) {
4422 struct stmmac_channel *ch = &priv->channel[queue];
4423
4424 netif_napi_del(&ch->napi);
4425 }
4426 error_hw_init:
4427 destroy_workqueue(priv->wq);
4428 error_wq:
4429 free_netdev(ndev);
4430
4431 return ret;
4432 }
4433 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4434
4435 /**
4436 * stmmac_dvr_remove
4437 * @dev: device pointer
4438 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4439 * changes the link status, releases the DMA descriptor rings.
4440 */
stmmac_dvr_remove(struct device * dev)4441 int stmmac_dvr_remove(struct device *dev)
4442 {
4443 struct net_device *ndev = dev_get_drvdata(dev);
4444 struct stmmac_priv *priv = netdev_priv(ndev);
4445
4446 netdev_info(priv->dev, "%s: removing driver", __func__);
4447
4448 #ifdef CONFIG_DEBUG_FS
4449 stmmac_exit_fs(ndev);
4450 #endif
4451 stmmac_stop_all_dma(priv);
4452
4453 stmmac_mac_set(priv, priv->ioaddr, false);
4454 netif_carrier_off(ndev);
4455 unregister_netdev(ndev);
4456 if (priv->plat->stmmac_rst)
4457 reset_control_assert(priv->plat->stmmac_rst);
4458 clk_disable_unprepare(priv->plat->pclk);
4459 clk_disable_unprepare(priv->plat->stmmac_clk);
4460 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4461 priv->hw->pcs != STMMAC_PCS_TBI &&
4462 priv->hw->pcs != STMMAC_PCS_RTBI)
4463 stmmac_mdio_unregister(ndev);
4464 destroy_workqueue(priv->wq);
4465 mutex_destroy(&priv->lock);
4466 free_netdev(ndev);
4467
4468 return 0;
4469 }
4470 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4471
4472 /**
4473 * stmmac_suspend - suspend callback
4474 * @dev: device pointer
4475 * Description: this is the function to suspend the device and it is called
4476 * by the platform driver to stop the network queue, release the resources,
4477 * program the PMT register (for WoL), clean and release driver resources.
4478 */
stmmac_suspend(struct device * dev)4479 int stmmac_suspend(struct device *dev)
4480 {
4481 struct net_device *ndev = dev_get_drvdata(dev);
4482 struct stmmac_priv *priv = netdev_priv(ndev);
4483 u32 chan;
4484
4485 if (!ndev || !netif_running(ndev))
4486 return 0;
4487
4488 if (ndev->phydev)
4489 phy_stop(ndev->phydev);
4490
4491 mutex_lock(&priv->lock);
4492
4493 netif_device_detach(ndev);
4494
4495 stmmac_disable_all_queues(priv);
4496
4497 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4498 del_timer_sync(&priv->tx_queue[chan].txtimer);
4499
4500 /* Stop TX/RX DMA */
4501 stmmac_stop_all_dma(priv);
4502
4503 /* Enable Power down mode by programming the PMT regs */
4504 if (device_may_wakeup(priv->device)) {
4505 stmmac_pmt(priv, priv->hw, priv->wolopts);
4506 priv->irq_wake = 1;
4507 } else {
4508 stmmac_mac_set(priv, priv->ioaddr, false);
4509 pinctrl_pm_select_sleep_state(priv->device);
4510 /* Disable clock in case of PWM is off */
4511 if (priv->plat->clk_ptp_ref)
4512 clk_disable_unprepare(priv->plat->clk_ptp_ref);
4513 clk_disable_unprepare(priv->plat->pclk);
4514 clk_disable_unprepare(priv->plat->stmmac_clk);
4515 }
4516 mutex_unlock(&priv->lock);
4517
4518 priv->oldlink = false;
4519 priv->speed = SPEED_UNKNOWN;
4520 priv->oldduplex = DUPLEX_UNKNOWN;
4521 return 0;
4522 }
4523 EXPORT_SYMBOL_GPL(stmmac_suspend);
4524
4525 /**
4526 * stmmac_reset_queues_param - reset queue parameters
4527 * @dev: device pointer
4528 */
stmmac_reset_queues_param(struct stmmac_priv * priv)4529 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4530 {
4531 u32 rx_cnt = priv->plat->rx_queues_to_use;
4532 u32 tx_cnt = priv->plat->tx_queues_to_use;
4533 u32 queue;
4534
4535 for (queue = 0; queue < rx_cnt; queue++) {
4536 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4537
4538 rx_q->cur_rx = 0;
4539 rx_q->dirty_rx = 0;
4540 }
4541
4542 for (queue = 0; queue < tx_cnt; queue++) {
4543 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4544
4545 tx_q->cur_tx = 0;
4546 tx_q->dirty_tx = 0;
4547 tx_q->mss = 0;
4548 }
4549 }
4550
4551 /**
4552 * stmmac_resume - resume callback
4553 * @dev: device pointer
4554 * Description: when resume this function is invoked to setup the DMA and CORE
4555 * in a usable state.
4556 */
stmmac_resume(struct device * dev)4557 int stmmac_resume(struct device *dev)
4558 {
4559 struct net_device *ndev = dev_get_drvdata(dev);
4560 struct stmmac_priv *priv = netdev_priv(ndev);
4561
4562 if (!netif_running(ndev))
4563 return 0;
4564
4565 /* Power Down bit, into the PM register, is cleared
4566 * automatically as soon as a magic packet or a Wake-up frame
4567 * is received. Anyway, it's better to manually clear
4568 * this bit because it can generate problems while resuming
4569 * from another devices (e.g. serial console).
4570 */
4571 if (device_may_wakeup(priv->device)) {
4572 mutex_lock(&priv->lock);
4573 stmmac_pmt(priv, priv->hw, 0);
4574 mutex_unlock(&priv->lock);
4575 priv->irq_wake = 0;
4576 } else {
4577 pinctrl_pm_select_default_state(priv->device);
4578 /* enable the clk previously disabled */
4579 clk_prepare_enable(priv->plat->stmmac_clk);
4580 clk_prepare_enable(priv->plat->pclk);
4581 if (priv->plat->clk_ptp_ref)
4582 clk_prepare_enable(priv->plat->clk_ptp_ref);
4583 /* reset the phy so that it's ready */
4584 if (priv->mii)
4585 stmmac_mdio_reset(priv->mii);
4586 }
4587
4588 netif_device_attach(ndev);
4589
4590 mutex_lock(&priv->lock);
4591
4592 stmmac_reset_queues_param(priv);
4593
4594 stmmac_clear_descriptors(priv);
4595
4596 stmmac_hw_setup(ndev, false);
4597 stmmac_init_tx_coalesce(priv);
4598 stmmac_set_rx_mode(ndev);
4599
4600 stmmac_enable_all_queues(priv);
4601
4602 mutex_unlock(&priv->lock);
4603
4604 if (ndev->phydev)
4605 phy_start(ndev->phydev);
4606
4607 return 0;
4608 }
4609 EXPORT_SYMBOL_GPL(stmmac_resume);
4610
4611 #ifndef MODULE
stmmac_cmdline_opt(char * str)4612 static int __init stmmac_cmdline_opt(char *str)
4613 {
4614 char *opt;
4615
4616 if (!str || !*str)
4617 return -EINVAL;
4618 while ((opt = strsep(&str, ",")) != NULL) {
4619 if (!strncmp(opt, "debug:", 6)) {
4620 if (kstrtoint(opt + 6, 0, &debug))
4621 goto err;
4622 } else if (!strncmp(opt, "phyaddr:", 8)) {
4623 if (kstrtoint(opt + 8, 0, &phyaddr))
4624 goto err;
4625 } else if (!strncmp(opt, "buf_sz:", 7)) {
4626 if (kstrtoint(opt + 7, 0, &buf_sz))
4627 goto err;
4628 } else if (!strncmp(opt, "tc:", 3)) {
4629 if (kstrtoint(opt + 3, 0, &tc))
4630 goto err;
4631 } else if (!strncmp(opt, "watchdog:", 9)) {
4632 if (kstrtoint(opt + 9, 0, &watchdog))
4633 goto err;
4634 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4635 if (kstrtoint(opt + 10, 0, &flow_ctrl))
4636 goto err;
4637 } else if (!strncmp(opt, "pause:", 6)) {
4638 if (kstrtoint(opt + 6, 0, &pause))
4639 goto err;
4640 } else if (!strncmp(opt, "eee_timer:", 10)) {
4641 if (kstrtoint(opt + 10, 0, &eee_timer))
4642 goto err;
4643 } else if (!strncmp(opt, "chain_mode:", 11)) {
4644 if (kstrtoint(opt + 11, 0, &chain_mode))
4645 goto err;
4646 }
4647 }
4648 return 0;
4649
4650 err:
4651 pr_err("%s: ERROR broken module parameter conversion", __func__);
4652 return -EINVAL;
4653 }
4654
4655 __setup("stmmaceth=", stmmac_cmdline_opt);
4656 #endif /* MODULE */
4657
stmmac_init(void)4658 static int __init stmmac_init(void)
4659 {
4660 #ifdef CONFIG_DEBUG_FS
4661 /* Create debugfs main directory if it doesn't exist yet */
4662 if (!stmmac_fs_dir) {
4663 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4664
4665 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4666 pr_err("ERROR %s, debugfs create directory failed\n",
4667 STMMAC_RESOURCE_NAME);
4668
4669 return -ENOMEM;
4670 }
4671 }
4672 #endif
4673
4674 return 0;
4675 }
4676
stmmac_exit(void)4677 static void __exit stmmac_exit(void)
4678 {
4679 #ifdef CONFIG_DEBUG_FS
4680 debugfs_remove_recursive(stmmac_fs_dir);
4681 #endif
4682 }
4683
4684 module_init(stmmac_init)
4685 module_exit(stmmac_exit)
4686
4687 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4688 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4689 MODULE_LICENSE("GPL");
4690