1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53
54 /* As long as the interface is active, we keep the timestamping counter enabled
55 * with fine resolution and binary rollover. This avoid non-monotonic behavior
56 * (clock jumps) when changing timestamping settings at runtime.
57 */
58 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 PTP_TCR_TSCTRLSSR)
60
61 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
63
64 /* Module parameters */
65 #define TX_TIMEO 5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77
78 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
80
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX 256
83 #define STMMAC_TX_XSK_AVAIL 16
84 #define STMMAC_RX_FILL_BATCH 16
85
86 #define STMMAC_XDP_PASS 0
87 #define STMMAC_XDP_CONSUMED BIT(0)
88 #define STMMAC_XDP_TX BIT(1)
89 #define STMMAC_XDP_REDIRECT BIT(2)
90
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103
104 #define DEFAULT_BUFSIZE 1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108
109 #define STMMAC_RX_COPYBREAK 256
110
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114
115 #define STMMAC_DEFAULT_LPI_TIMER 1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122 * but allow user to force to use the chain instead of the ring
123 */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 u32 rxmode, u32 chan);
141
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 int ret = 0;
153
154 if (enabled) {
155 ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 if (ret)
157 return ret;
158 ret = clk_prepare_enable(priv->plat->pclk);
159 if (ret) {
160 clk_disable_unprepare(priv->plat->stmmac_clk);
161 return ret;
162 }
163 if (priv->plat->clks_config) {
164 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 if (ret) {
166 clk_disable_unprepare(priv->plat->stmmac_clk);
167 clk_disable_unprepare(priv->plat->pclk);
168 return ret;
169 }
170 }
171 } else {
172 clk_disable_unprepare(priv->plat->stmmac_clk);
173 clk_disable_unprepare(priv->plat->pclk);
174 if (priv->plat->clks_config)
175 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 }
177
178 return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181
182 /**
183 * stmmac_verify_args - verify the driver parameters.
184 * Description: it checks the driver parameters and set a default in case of
185 * errors.
186 */
stmmac_verify_args(void)187 static void stmmac_verify_args(void)
188 {
189 if (unlikely(watchdog < 0))
190 watchdog = TX_TIMEO;
191 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 buf_sz = DEFAULT_BUFSIZE;
193 if (unlikely(flow_ctrl > 1))
194 flow_ctrl = FLOW_AUTO;
195 else if (likely(flow_ctrl < 0))
196 flow_ctrl = FLOW_OFF;
197 if (unlikely((pause < 0) || (pause > 0xffff)))
198 pause = PAUSE_TIME;
199 if (eee_timer < 0)
200 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202
__stmmac_disable_all_queues(struct stmmac_priv * priv)203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 u32 queue;
209
210 for (queue = 0; queue < maxq; queue++) {
211 struct stmmac_channel *ch = &priv->channel[queue];
212
213 if (stmmac_xdp_is_enabled(priv) &&
214 test_bit(queue, priv->af_xdp_zc_qps)) {
215 napi_disable(&ch->rxtx_napi);
216 continue;
217 }
218
219 if (queue < rx_queues_cnt)
220 napi_disable(&ch->rx_napi);
221 if (queue < tx_queues_cnt)
222 napi_disable(&ch->tx_napi);
223 }
224 }
225
226 /**
227 * stmmac_disable_all_queues - Disable all queues
228 * @priv: driver private structure
229 */
stmmac_disable_all_queues(struct stmmac_priv * priv)230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 struct stmmac_rx_queue *rx_q;
234 u32 queue;
235
236 /* synchronize_rcu() needed for pending XDP buffers to drain */
237 for (queue = 0; queue < rx_queues_cnt; queue++) {
238 rx_q = &priv->dma_conf.rx_queue[queue];
239 if (rx_q->xsk_pool) {
240 synchronize_rcu();
241 break;
242 }
243 }
244
245 __stmmac_disable_all_queues(priv);
246 }
247
248 /**
249 * stmmac_enable_all_queues - Enable all queues
250 * @priv: driver private structure
251 */
stmmac_enable_all_queues(struct stmmac_priv * priv)252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 u32 queue;
258
259 for (queue = 0; queue < maxq; queue++) {
260 struct stmmac_channel *ch = &priv->channel[queue];
261
262 if (stmmac_xdp_is_enabled(priv) &&
263 test_bit(queue, priv->af_xdp_zc_qps)) {
264 napi_enable(&ch->rxtx_napi);
265 continue;
266 }
267
268 if (queue < rx_queues_cnt)
269 napi_enable(&ch->rx_napi);
270 if (queue < tx_queues_cnt)
271 napi_enable(&ch->tx_napi);
272 }
273 }
274
stmmac_service_event_schedule(struct stmmac_priv * priv)275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 queue_work(priv->wq, &priv->service_task);
280 }
281
stmmac_global_err(struct stmmac_priv * priv)282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 netif_carrier_off(priv->dev);
285 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 stmmac_service_event_schedule(priv);
287 }
288
289 /**
290 * stmmac_clk_csr_set - dynamically set the MDC clock
291 * @priv: driver private structure
292 * Description: this is to dynamically set the MDC clock according to the csr
293 * clock input.
294 * Note:
295 * If a specific clk_csr value is passed from the platform
296 * this means that the CSR Clock Range selection cannot be
297 * changed at run-time and it is fixed (as reported in the driver
298 * documentation). Viceversa the driver will try to set the MDC
299 * clock dynamically according to the actual clock input.
300 */
stmmac_clk_csr_set(struct stmmac_priv * priv)301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 u32 clk_rate;
304
305 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306
307 /* Platform provided default clk_csr would be assumed valid
308 * for all other cases except for the below mentioned ones.
309 * For values higher than the IEEE 802.3 specified frequency
310 * we can not estimate the proper divider as it is not known
311 * the frequency of clk_csr_i. So we do not change the default
312 * divider.
313 */
314 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 if (clk_rate < CSR_F_35M)
316 priv->clk_csr = STMMAC_CSR_20_35M;
317 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 priv->clk_csr = STMMAC_CSR_35_60M;
319 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 priv->clk_csr = STMMAC_CSR_60_100M;
321 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 priv->clk_csr = STMMAC_CSR_100_150M;
323 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 priv->clk_csr = STMMAC_CSR_150_250M;
325 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 priv->clk_csr = STMMAC_CSR_250_300M;
327 }
328
329 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 if (clk_rate > 160000000)
331 priv->clk_csr = 0x03;
332 else if (clk_rate > 80000000)
333 priv->clk_csr = 0x02;
334 else if (clk_rate > 40000000)
335 priv->clk_csr = 0x01;
336 else
337 priv->clk_csr = 0;
338 }
339
340 if (priv->plat->has_xgmac) {
341 if (clk_rate > 400000000)
342 priv->clk_csr = 0x5;
343 else if (clk_rate > 350000000)
344 priv->clk_csr = 0x4;
345 else if (clk_rate > 300000000)
346 priv->clk_csr = 0x3;
347 else if (clk_rate > 250000000)
348 priv->clk_csr = 0x2;
349 else if (clk_rate > 150000000)
350 priv->clk_csr = 0x1;
351 else
352 priv->clk_csr = 0x0;
353 }
354 }
355
print_pkt(unsigned char * buf,int len)356 static void print_pkt(unsigned char *buf, int len)
357 {
358 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 u32 avail;
366
367 if (tx_q->dirty_tx > tx_q->cur_tx)
368 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 else
370 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371
372 return avail;
373 }
374
375 /**
376 * stmmac_rx_dirty - Get RX queue dirty
377 * @priv: driver private structure
378 * @queue: RX queue index
379 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 u32 dirty;
384
385 if (rx_q->dirty_rx <= rx_q->cur_rx)
386 dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 else
388 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389
390 return dirty;
391 }
392
stmmac_lpi_entry_timer_config(struct stmmac_priv * priv,bool en)393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 int tx_lpi_timer;
396
397 /* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 priv->eee_sw_timer_en = en ? 0 : 1;
399 tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
400 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402
403 /**
404 * stmmac_enable_eee_mode - check and enter in LPI mode
405 * @priv: driver private structure
406 * Description: this function is to verify and enter in LPI mode in case of
407 * EEE.
408 */
stmmac_enable_eee_mode(struct stmmac_priv * priv)409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 u32 tx_cnt = priv->plat->tx_queues_to_use;
412 u32 queue;
413
414 /* check if all TX queues have the work finished */
415 for (queue = 0; queue < tx_cnt; queue++) {
416 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417
418 if (tx_q->dirty_tx != tx_q->cur_tx)
419 return -EBUSY; /* still unfinished work */
420 }
421
422 /* Check and enter in LPI mode */
423 if (!priv->tx_path_in_lpi_mode)
424 stmmac_set_eee_mode(priv, priv->hw,
425 priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 return 0;
427 }
428
429 /**
430 * stmmac_disable_eee_mode - disable and exit from LPI mode
431 * @priv: driver private structure
432 * Description: this function is to exit and disable EEE in case of
433 * LPI state is true. This is called by the xmit.
434 */
stmmac_disable_eee_mode(struct stmmac_priv * priv)435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 if (!priv->eee_sw_timer_en) {
438 stmmac_lpi_entry_timer_config(priv, 0);
439 return;
440 }
441
442 stmmac_reset_eee_mode(priv, priv->hw);
443 del_timer_sync(&priv->eee_ctrl_timer);
444 priv->tx_path_in_lpi_mode = false;
445 }
446
447 /**
448 * stmmac_eee_ctrl_timer - EEE TX SW timer.
449 * @t: timer_list struct containing private info
450 * Description:
451 * if there is no data transfer and if we are not in LPI state,
452 * then MAC Transmitter can be moved to LPI state.
453 */
stmmac_eee_ctrl_timer(struct timer_list * t)454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457
458 if (stmmac_enable_eee_mode(priv))
459 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461
462 /**
463 * stmmac_eee_init - init EEE
464 * @priv: driver private structure
465 * Description:
466 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
467 * can also manage EEE, this function enable the LPI state and start related
468 * timer.
469 */
stmmac_eee_init(struct stmmac_priv * priv)470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 int eee_tw_timer = priv->eee_tw_timer;
473
474 /* Using PCS we cannot dial with the phy registers at this stage
475 * so we do not support extra feature like EEE.
476 */
477 if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 priv->hw->pcs == STMMAC_PCS_RTBI)
479 return false;
480
481 /* Check if MAC core supports the EEE feature. */
482 if (!priv->dma_cap.eee)
483 return false;
484
485 mutex_lock(&priv->lock);
486
487 /* Check if it needs to be deactivated */
488 if (!priv->eee_active) {
489 if (priv->eee_enabled) {
490 netdev_dbg(priv->dev, "disable EEE\n");
491 stmmac_lpi_entry_timer_config(priv, 0);
492 del_timer_sync(&priv->eee_ctrl_timer);
493 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 if (priv->hw->xpcs)
495 xpcs_config_eee(priv->hw->xpcs,
496 priv->plat->mult_fact_100ns,
497 false);
498 }
499 mutex_unlock(&priv->lock);
500 return false;
501 }
502
503 if (priv->eee_active && !priv->eee_enabled) {
504 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 eee_tw_timer);
507 if (priv->hw->xpcs)
508 xpcs_config_eee(priv->hw->xpcs,
509 priv->plat->mult_fact_100ns,
510 true);
511 }
512
513 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 del_timer_sync(&priv->eee_ctrl_timer);
515 priv->tx_path_in_lpi_mode = false;
516 stmmac_lpi_entry_timer_config(priv, 1);
517 } else {
518 stmmac_lpi_entry_timer_config(priv, 0);
519 mod_timer(&priv->eee_ctrl_timer,
520 STMMAC_LPI_T(priv->tx_lpi_timer));
521 }
522
523 mutex_unlock(&priv->lock);
524 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 return true;
526 }
527
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529 * @priv: driver private structure
530 * @p : descriptor pointer
531 * @skb : the socket buffer
532 * Description :
533 * This function will read timestamp from the descriptor & pass it to stack.
534 * and also perform some sanity checks.
535 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 struct dma_desc *p, struct sk_buff *skb)
538 {
539 struct skb_shared_hwtstamps shhwtstamp;
540 bool found = false;
541 u64 ns = 0;
542
543 if (!priv->hwts_tx_en)
544 return;
545
546 /* exit if skb doesn't support hw tstamp */
547 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 return;
549
550 /* check tx tstamp status */
551 if (stmmac_get_tx_timestamp_status(priv, p)) {
552 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 found = true;
554 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 found = true;
556 }
557
558 if (found) {
559 ns -= priv->plat->cdc_error_adj;
560
561 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 shhwtstamp.hwtstamp = ns_to_ktime(ns);
563
564 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 /* pass tstamp to stack */
566 skb_tstamp_tx(skb, &shhwtstamp);
567 }
568 }
569
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571 * @priv: driver private structure
572 * @p : descriptor pointer
573 * @np : next descriptor pointer
574 * @skb : the socket buffer
575 * Description :
576 * This function will read received packet's timestamp from the descriptor
577 * and pass it to stack. It also perform some sanity checks.
578 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 struct dma_desc *np, struct sk_buff *skb)
581 {
582 struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 struct dma_desc *desc = p;
584 u64 ns = 0;
585
586 if (!priv->hwts_rx_en)
587 return;
588 /* For GMAC4, the valid timestamp is from CTX next desc. */
589 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 desc = np;
591
592 /* Check if timestamp is available */
593 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595
596 ns -= priv->plat->cdc_error_adj;
597
598 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 shhwtstamp = skb_hwtstamps(skb);
600 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 } else {
603 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 }
605 }
606
607 /**
608 * stmmac_hwtstamp_set - control hardware timestamping.
609 * @dev: device pointer.
610 * @ifr: An IOCTL specific structure, that can contain a pointer to
611 * a proprietary structure used to pass information to the driver.
612 * Description:
613 * This function configures the MAC to enable/disable both outgoing(TX)
614 * and incoming(RX) packets time stamping based on user input.
615 * Return Value:
616 * 0 on success and an appropriate -ve integer on failure.
617 */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 struct stmmac_priv *priv = netdev_priv(dev);
621 struct hwtstamp_config config;
622 u32 ptp_v2 = 0;
623 u32 tstamp_all = 0;
624 u32 ptp_over_ipv4_udp = 0;
625 u32 ptp_over_ipv6_udp = 0;
626 u32 ptp_over_ethernet = 0;
627 u32 snap_type_sel = 0;
628 u32 ts_master_en = 0;
629 u32 ts_event_en = 0;
630
631 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 netdev_alert(priv->dev, "No support for HW time stamping\n");
633 priv->hwts_tx_en = 0;
634 priv->hwts_rx_en = 0;
635
636 return -EOPNOTSUPP;
637 }
638
639 if (copy_from_user(&config, ifr->ifr_data,
640 sizeof(config)))
641 return -EFAULT;
642
643 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 __func__, config.flags, config.tx_type, config.rx_filter);
645
646 if (config.tx_type != HWTSTAMP_TX_OFF &&
647 config.tx_type != HWTSTAMP_TX_ON)
648 return -ERANGE;
649
650 if (priv->adv_ts) {
651 switch (config.rx_filter) {
652 case HWTSTAMP_FILTER_NONE:
653 /* time stamp no incoming packet at all */
654 config.rx_filter = HWTSTAMP_FILTER_NONE;
655 break;
656
657 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 /* PTP v1, UDP, any kind of event packet */
659 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 /* 'xmac' hardware can support Sync, Pdelay_Req and
661 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 * This leaves Delay_Req timestamps out.
663 * Enable all events *and* general purpose message
664 * timestamping
665 */
666 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 break;
670
671 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 /* PTP v1, UDP, Sync packet */
673 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 /* take time stamp for SYNC messages only */
675 ts_event_en = PTP_TCR_TSEVNTENA;
676
677 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 break;
680
681 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 /* PTP v1, UDP, Delay_req packet */
683 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 /* take time stamp for Delay_Req messages only */
685 ts_master_en = PTP_TCR_TSMSTRENA;
686 ts_event_en = PTP_TCR_TSEVNTENA;
687
688 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 break;
691
692 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 /* PTP v2, UDP, any kind of event packet */
694 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 ptp_v2 = PTP_TCR_TSVER2ENA;
696 /* take time stamp for all event messages */
697 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698
699 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 break;
702
703 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 /* PTP v2, UDP, Sync packet */
705 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 ptp_v2 = PTP_TCR_TSVER2ENA;
707 /* take time stamp for SYNC messages only */
708 ts_event_en = PTP_TCR_TSEVNTENA;
709
710 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 break;
713
714 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 /* PTP v2, UDP, Delay_req packet */
716 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 ptp_v2 = PTP_TCR_TSVER2ENA;
718 /* take time stamp for Delay_Req messages only */
719 ts_master_en = PTP_TCR_TSMSTRENA;
720 ts_event_en = PTP_TCR_TSEVNTENA;
721
722 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 break;
725
726 case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 /* PTP v2/802.AS1 any layer, any kind of event packet */
728 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 ptp_v2 = PTP_TCR_TSVER2ENA;
730 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 if (priv->synopsys_id < DWMAC_CORE_4_10)
732 ts_event_en = PTP_TCR_TSEVNTENA;
733 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 ptp_over_ethernet = PTP_TCR_TSIPENA;
736 break;
737
738 case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 /* PTP v2/802.AS1, any layer, Sync packet */
740 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 ptp_v2 = PTP_TCR_TSVER2ENA;
742 /* take time stamp for SYNC messages only */
743 ts_event_en = PTP_TCR_TSEVNTENA;
744
745 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 ptp_over_ethernet = PTP_TCR_TSIPENA;
748 break;
749
750 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 /* PTP v2/802.AS1, any layer, Delay_req packet */
752 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 ptp_v2 = PTP_TCR_TSVER2ENA;
754 /* take time stamp for Delay_Req messages only */
755 ts_master_en = PTP_TCR_TSMSTRENA;
756 ts_event_en = PTP_TCR_TSEVNTENA;
757
758 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 ptp_over_ethernet = PTP_TCR_TSIPENA;
761 break;
762
763 case HWTSTAMP_FILTER_NTP_ALL:
764 case HWTSTAMP_FILTER_ALL:
765 /* time stamp any incoming packet */
766 config.rx_filter = HWTSTAMP_FILTER_ALL;
767 tstamp_all = PTP_TCR_TSENALL;
768 break;
769
770 default:
771 return -ERANGE;
772 }
773 } else {
774 switch (config.rx_filter) {
775 case HWTSTAMP_FILTER_NONE:
776 config.rx_filter = HWTSTAMP_FILTER_NONE;
777 break;
778 default:
779 /* PTP v1, UDP, any kind of event packet */
780 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 break;
782 }
783 }
784 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786
787 priv->systime_flags = STMMAC_HWTS_ACTIVE;
788
789 if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 priv->systime_flags |= tstamp_all | ptp_v2 |
791 ptp_over_ethernet | ptp_over_ipv6_udp |
792 ptp_over_ipv4_udp | ts_event_en |
793 ts_master_en | snap_type_sel;
794 }
795
796 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797
798 memcpy(&priv->tstamp_config, &config, sizeof(config));
799
800 return copy_to_user(ifr->ifr_data, &config,
801 sizeof(config)) ? -EFAULT : 0;
802 }
803
804 /**
805 * stmmac_hwtstamp_get - read hardware timestamping.
806 * @dev: device pointer.
807 * @ifr: An IOCTL specific structure, that can contain a pointer to
808 * a proprietary structure used to pass information to the driver.
809 * Description:
810 * This function obtain the current hardware timestamping settings
811 * as requested.
812 */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 struct stmmac_priv *priv = netdev_priv(dev);
816 struct hwtstamp_config *config = &priv->tstamp_config;
817
818 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 return -EOPNOTSUPP;
820
821 return copy_to_user(ifr->ifr_data, config,
822 sizeof(*config)) ? -EFAULT : 0;
823 }
824
825 /**
826 * stmmac_init_tstamp_counter - init hardware timestamping counter
827 * @priv: driver private structure
828 * @systime_flags: timestamping flags
829 * Description:
830 * Initialize hardware counter for packet timestamping.
831 * This is valid as long as the interface is open and not suspended.
832 * Will be rerun after resuming from suspend, case in which the timestamping
833 * flags updated by stmmac_hwtstamp_set() also need to be restored.
834 */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 struct timespec64 now;
839 u32 sec_inc = 0;
840 u64 temp = 0;
841
842 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 return -EOPNOTSUPP;
844
845 if (!priv->plat->clk_ptp_rate) {
846 netdev_err(priv->dev, "Invalid PTP clock rate");
847 return -EINVAL;
848 }
849
850 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
851 priv->systime_flags = systime_flags;
852
853 /* program Sub Second Increment reg */
854 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
855 priv->plat->clk_ptp_rate,
856 xmac, &sec_inc);
857 temp = div_u64(1000000000ULL, sec_inc);
858
859 /* Store sub second increment for later use */
860 priv->sub_second_inc = sec_inc;
861
862 /* calculate default added value:
863 * formula is :
864 * addend = (2^32)/freq_div_ratio;
865 * where, freq_div_ratio = 1e9ns/sec_inc
866 */
867 temp = (u64)(temp << 32);
868 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
869 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
870
871 /* initialize system time */
872 ktime_get_real_ts64(&now);
873
874 /* lower 32 bits of tv_sec are safe until y2106 */
875 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
876
877 return 0;
878 }
879 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
880
881 /**
882 * stmmac_init_ptp - init PTP
883 * @priv: driver private structure
884 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
885 * This is done by looking at the HW cap. register.
886 * This function also registers the ptp driver.
887 */
stmmac_init_ptp(struct stmmac_priv * priv)888 static int stmmac_init_ptp(struct stmmac_priv *priv)
889 {
890 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
891 int ret;
892
893 if (priv->plat->ptp_clk_freq_config)
894 priv->plat->ptp_clk_freq_config(priv);
895
896 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
897 if (ret)
898 return ret;
899
900 priv->adv_ts = 0;
901 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
902 if (xmac && priv->dma_cap.atime_stamp)
903 priv->adv_ts = 1;
904 /* Dwmac 3.x core with extend_desc can support adv_ts */
905 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
906 priv->adv_ts = 1;
907
908 if (priv->dma_cap.time_stamp)
909 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
910
911 if (priv->adv_ts)
912 netdev_info(priv->dev,
913 "IEEE 1588-2008 Advanced Timestamp supported\n");
914
915 priv->hwts_tx_en = 0;
916 priv->hwts_rx_en = 0;
917
918 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
919 stmmac_hwtstamp_correct_latency(priv, priv);
920
921 return 0;
922 }
923
stmmac_release_ptp(struct stmmac_priv * priv)924 static void stmmac_release_ptp(struct stmmac_priv *priv)
925 {
926 clk_disable_unprepare(priv->plat->clk_ptp_ref);
927 stmmac_ptp_unregister(priv);
928 }
929
930 /**
931 * stmmac_mac_flow_ctrl - Configure flow control in all queues
932 * @priv: driver private structure
933 * @duplex: duplex passed to the next function
934 * Description: It is used for configuring the flow control in all queues
935 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)936 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
937 {
938 u32 tx_cnt = priv->plat->tx_queues_to_use;
939
940 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
941 priv->pause, tx_cnt);
942 }
943
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)944 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
945 phy_interface_t interface)
946 {
947 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
948
949 if (priv->hw->xpcs)
950 return &priv->hw->xpcs->pcs;
951
952 if (priv->hw->lynx_pcs)
953 return priv->hw->lynx_pcs;
954
955 return NULL;
956 }
957
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)958 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
959 const struct phylink_link_state *state)
960 {
961 /* Nothing to do, xpcs_config() handles everything */
962 }
963
stmmac_fpe_link_state_handle(struct stmmac_priv * priv,bool is_up)964 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
965 {
966 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
967 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
968 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
969 bool *hs_enable = &fpe_cfg->hs_enable;
970
971 if (is_up && *hs_enable) {
972 stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
973 MPACKET_VERIFY);
974 } else {
975 *lo_state = FPE_STATE_OFF;
976 *lp_state = FPE_STATE_OFF;
977 }
978 }
979
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)980 static void stmmac_mac_link_down(struct phylink_config *config,
981 unsigned int mode, phy_interface_t interface)
982 {
983 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
984
985 stmmac_mac_set(priv, priv->ioaddr, false);
986 priv->eee_active = false;
987 priv->tx_lpi_enabled = false;
988 priv->eee_enabled = stmmac_eee_init(priv);
989 stmmac_set_eee_pls(priv, priv->hw, false);
990
991 if (priv->dma_cap.fpesel)
992 stmmac_fpe_link_state_handle(priv, false);
993 }
994
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)995 static void stmmac_mac_link_up(struct phylink_config *config,
996 struct phy_device *phy,
997 unsigned int mode, phy_interface_t interface,
998 int speed, int duplex,
999 bool tx_pause, bool rx_pause)
1000 {
1001 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1002 u32 old_ctrl, ctrl;
1003
1004 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1005 priv->plat->serdes_powerup)
1006 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1007
1008 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1009 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1010
1011 if (interface == PHY_INTERFACE_MODE_USXGMII) {
1012 switch (speed) {
1013 case SPEED_10000:
1014 ctrl |= priv->hw->link.xgmii.speed10000;
1015 break;
1016 case SPEED_5000:
1017 ctrl |= priv->hw->link.xgmii.speed5000;
1018 break;
1019 case SPEED_2500:
1020 ctrl |= priv->hw->link.xgmii.speed2500;
1021 break;
1022 default:
1023 return;
1024 }
1025 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1026 switch (speed) {
1027 case SPEED_100000:
1028 ctrl |= priv->hw->link.xlgmii.speed100000;
1029 break;
1030 case SPEED_50000:
1031 ctrl |= priv->hw->link.xlgmii.speed50000;
1032 break;
1033 case SPEED_40000:
1034 ctrl |= priv->hw->link.xlgmii.speed40000;
1035 break;
1036 case SPEED_25000:
1037 ctrl |= priv->hw->link.xlgmii.speed25000;
1038 break;
1039 case SPEED_10000:
1040 ctrl |= priv->hw->link.xgmii.speed10000;
1041 break;
1042 case SPEED_2500:
1043 ctrl |= priv->hw->link.speed2500;
1044 break;
1045 case SPEED_1000:
1046 ctrl |= priv->hw->link.speed1000;
1047 break;
1048 default:
1049 return;
1050 }
1051 } else {
1052 switch (speed) {
1053 case SPEED_2500:
1054 ctrl |= priv->hw->link.speed2500;
1055 break;
1056 case SPEED_1000:
1057 ctrl |= priv->hw->link.speed1000;
1058 break;
1059 case SPEED_100:
1060 ctrl |= priv->hw->link.speed100;
1061 break;
1062 case SPEED_10:
1063 ctrl |= priv->hw->link.speed10;
1064 break;
1065 default:
1066 return;
1067 }
1068 }
1069
1070 priv->speed = speed;
1071
1072 if (priv->plat->fix_mac_speed)
1073 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1074
1075 if (!duplex)
1076 ctrl &= ~priv->hw->link.duplex;
1077 else
1078 ctrl |= priv->hw->link.duplex;
1079
1080 /* Flow Control operation */
1081 if (rx_pause && tx_pause)
1082 priv->flow_ctrl = FLOW_AUTO;
1083 else if (rx_pause && !tx_pause)
1084 priv->flow_ctrl = FLOW_RX;
1085 else if (!rx_pause && tx_pause)
1086 priv->flow_ctrl = FLOW_TX;
1087 else
1088 priv->flow_ctrl = FLOW_OFF;
1089
1090 stmmac_mac_flow_ctrl(priv, duplex);
1091
1092 if (ctrl != old_ctrl)
1093 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1094
1095 stmmac_mac_set(priv, priv->ioaddr, true);
1096 if (phy && priv->dma_cap.eee) {
1097 priv->eee_active =
1098 phy_init_eee(phy, !(priv->plat->flags &
1099 STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1100 priv->eee_enabled = stmmac_eee_init(priv);
1101 priv->tx_lpi_enabled = priv->eee_enabled;
1102 stmmac_set_eee_pls(priv, priv->hw, true);
1103 }
1104
1105 if (priv->dma_cap.fpesel)
1106 stmmac_fpe_link_state_handle(priv, true);
1107
1108 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1109 stmmac_hwtstamp_correct_latency(priv, priv);
1110 }
1111
1112 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1113 .mac_select_pcs = stmmac_mac_select_pcs,
1114 .mac_config = stmmac_mac_config,
1115 .mac_link_down = stmmac_mac_link_down,
1116 .mac_link_up = stmmac_mac_link_up,
1117 };
1118
1119 /**
1120 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1121 * @priv: driver private structure
1122 * Description: this is to verify if the HW supports the PCS.
1123 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1124 * configured for the TBI, RTBI, or SGMII PHY interface.
1125 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1126 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1127 {
1128 int interface = priv->plat->mac_interface;
1129
1130 if (priv->dma_cap.pcs) {
1131 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1132 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1133 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1134 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1135 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1136 priv->hw->pcs = STMMAC_PCS_RGMII;
1137 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1138 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1139 priv->hw->pcs = STMMAC_PCS_SGMII;
1140 }
1141 }
1142 }
1143
1144 /**
1145 * stmmac_init_phy - PHY initialization
1146 * @dev: net device structure
1147 * Description: it initializes the driver's PHY state, and attaches the PHY
1148 * to the mac driver.
1149 * Return value:
1150 * 0 on success
1151 */
stmmac_init_phy(struct net_device * dev)1152 static int stmmac_init_phy(struct net_device *dev)
1153 {
1154 struct stmmac_priv *priv = netdev_priv(dev);
1155 struct fwnode_handle *phy_fwnode;
1156 struct fwnode_handle *fwnode;
1157 int ret;
1158
1159 if (!phylink_expects_phy(priv->phylink))
1160 return 0;
1161
1162 fwnode = priv->plat->port_node;
1163 if (!fwnode)
1164 fwnode = dev_fwnode(priv->device);
1165
1166 if (fwnode)
1167 phy_fwnode = fwnode_get_phy_node(fwnode);
1168 else
1169 phy_fwnode = NULL;
1170
1171 /* Some DT bindings do not set-up the PHY handle. Let's try to
1172 * manually parse it
1173 */
1174 if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1175 int addr = priv->plat->phy_addr;
1176 struct phy_device *phydev;
1177
1178 if (addr < 0) {
1179 netdev_err(priv->dev, "no phy found\n");
1180 return -ENODEV;
1181 }
1182
1183 phydev = mdiobus_get_phy(priv->mii, addr);
1184 if (!phydev) {
1185 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1186 return -ENODEV;
1187 }
1188
1189 ret = phylink_connect_phy(priv->phylink, phydev);
1190 } else {
1191 fwnode_handle_put(phy_fwnode);
1192 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1193 }
1194
1195 if (!priv->plat->pmt) {
1196 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1197
1198 phylink_ethtool_get_wol(priv->phylink, &wol);
1199 device_set_wakeup_capable(priv->device, !!wol.supported);
1200 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1201 }
1202
1203 return ret;
1204 }
1205
stmmac_phy_setup(struct stmmac_priv * priv)1206 static int stmmac_phy_setup(struct stmmac_priv *priv)
1207 {
1208 struct stmmac_mdio_bus_data *mdio_bus_data;
1209 int mode = priv->plat->phy_interface;
1210 struct fwnode_handle *fwnode;
1211 struct phylink *phylink;
1212 int max_speed;
1213
1214 priv->phylink_config.dev = &priv->dev->dev;
1215 priv->phylink_config.type = PHYLINK_NETDEV;
1216 priv->phylink_config.mac_managed_pm = true;
1217
1218 mdio_bus_data = priv->plat->mdio_bus_data;
1219 if (mdio_bus_data)
1220 priv->phylink_config.ovr_an_inband =
1221 mdio_bus_data->xpcs_an_inband;
1222
1223 /* Set the platform/firmware specified interface mode. Note, phylink
1224 * deals with the PHY interface mode, not the MAC interface mode.
1225 */
1226 __set_bit(mode, priv->phylink_config.supported_interfaces);
1227
1228 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1229 if (priv->hw->xpcs)
1230 xpcs_get_interfaces(priv->hw->xpcs,
1231 priv->phylink_config.supported_interfaces);
1232
1233 /* Get the MAC specific capabilities */
1234 stmmac_mac_phylink_get_caps(priv);
1235
1236 priv->phylink_config.mac_capabilities = priv->hw->link.caps;
1237
1238 max_speed = priv->plat->max_speed;
1239 if (max_speed)
1240 phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1241
1242 fwnode = priv->plat->port_node;
1243 if (!fwnode)
1244 fwnode = dev_fwnode(priv->device);
1245
1246 phylink = phylink_create(&priv->phylink_config, fwnode,
1247 mode, &stmmac_phylink_mac_ops);
1248 if (IS_ERR(phylink))
1249 return PTR_ERR(phylink);
1250
1251 priv->phylink = phylink;
1252 return 0;
1253 }
1254
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1255 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1256 struct stmmac_dma_conf *dma_conf)
1257 {
1258 u32 rx_cnt = priv->plat->rx_queues_to_use;
1259 unsigned int desc_size;
1260 void *head_rx;
1261 u32 queue;
1262
1263 /* Display RX rings */
1264 for (queue = 0; queue < rx_cnt; queue++) {
1265 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1266
1267 pr_info("\tRX Queue %u rings\n", queue);
1268
1269 if (priv->extend_desc) {
1270 head_rx = (void *)rx_q->dma_erx;
1271 desc_size = sizeof(struct dma_extended_desc);
1272 } else {
1273 head_rx = (void *)rx_q->dma_rx;
1274 desc_size = sizeof(struct dma_desc);
1275 }
1276
1277 /* Display RX ring */
1278 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1279 rx_q->dma_rx_phy, desc_size);
1280 }
1281 }
1282
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1283 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1284 struct stmmac_dma_conf *dma_conf)
1285 {
1286 u32 tx_cnt = priv->plat->tx_queues_to_use;
1287 unsigned int desc_size;
1288 void *head_tx;
1289 u32 queue;
1290
1291 /* Display TX rings */
1292 for (queue = 0; queue < tx_cnt; queue++) {
1293 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1294
1295 pr_info("\tTX Queue %d rings\n", queue);
1296
1297 if (priv->extend_desc) {
1298 head_tx = (void *)tx_q->dma_etx;
1299 desc_size = sizeof(struct dma_extended_desc);
1300 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1301 head_tx = (void *)tx_q->dma_entx;
1302 desc_size = sizeof(struct dma_edesc);
1303 } else {
1304 head_tx = (void *)tx_q->dma_tx;
1305 desc_size = sizeof(struct dma_desc);
1306 }
1307
1308 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1309 tx_q->dma_tx_phy, desc_size);
1310 }
1311 }
1312
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1313 static void stmmac_display_rings(struct stmmac_priv *priv,
1314 struct stmmac_dma_conf *dma_conf)
1315 {
1316 /* Display RX ring */
1317 stmmac_display_rx_rings(priv, dma_conf);
1318
1319 /* Display TX ring */
1320 stmmac_display_tx_rings(priv, dma_conf);
1321 }
1322
stmmac_set_bfsize(int mtu,int bufsize)1323 static int stmmac_set_bfsize(int mtu, int bufsize)
1324 {
1325 int ret = bufsize;
1326
1327 if (mtu >= BUF_SIZE_8KiB)
1328 ret = BUF_SIZE_16KiB;
1329 else if (mtu >= BUF_SIZE_4KiB)
1330 ret = BUF_SIZE_8KiB;
1331 else if (mtu >= BUF_SIZE_2KiB)
1332 ret = BUF_SIZE_4KiB;
1333 else if (mtu > DEFAULT_BUFSIZE)
1334 ret = BUF_SIZE_2KiB;
1335 else
1336 ret = DEFAULT_BUFSIZE;
1337
1338 return ret;
1339 }
1340
1341 /**
1342 * stmmac_clear_rx_descriptors - clear RX descriptors
1343 * @priv: driver private structure
1344 * @dma_conf: structure to take the dma data
1345 * @queue: RX queue index
1346 * Description: this function is called to clear the RX descriptors
1347 * in case of both basic and extended descriptors are used.
1348 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1349 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1350 struct stmmac_dma_conf *dma_conf,
1351 u32 queue)
1352 {
1353 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1354 int i;
1355
1356 /* Clear the RX descriptors */
1357 for (i = 0; i < dma_conf->dma_rx_size; i++)
1358 if (priv->extend_desc)
1359 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1360 priv->use_riwt, priv->mode,
1361 (i == dma_conf->dma_rx_size - 1),
1362 dma_conf->dma_buf_sz);
1363 else
1364 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1365 priv->use_riwt, priv->mode,
1366 (i == dma_conf->dma_rx_size - 1),
1367 dma_conf->dma_buf_sz);
1368 }
1369
1370 /**
1371 * stmmac_clear_tx_descriptors - clear tx descriptors
1372 * @priv: driver private structure
1373 * @dma_conf: structure to take the dma data
1374 * @queue: TX queue index.
1375 * Description: this function is called to clear the TX descriptors
1376 * in case of both basic and extended descriptors are used.
1377 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1378 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1379 struct stmmac_dma_conf *dma_conf,
1380 u32 queue)
1381 {
1382 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1383 int i;
1384
1385 /* Clear the TX descriptors */
1386 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1387 int last = (i == (dma_conf->dma_tx_size - 1));
1388 struct dma_desc *p;
1389
1390 if (priv->extend_desc)
1391 p = &tx_q->dma_etx[i].basic;
1392 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1393 p = &tx_q->dma_entx[i].basic;
1394 else
1395 p = &tx_q->dma_tx[i];
1396
1397 stmmac_init_tx_desc(priv, p, priv->mode, last);
1398 }
1399 }
1400
1401 /**
1402 * stmmac_clear_descriptors - clear descriptors
1403 * @priv: driver private structure
1404 * @dma_conf: structure to take the dma data
1405 * Description: this function is called to clear the TX and RX descriptors
1406 * in case of both basic and extended descriptors are used.
1407 */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1408 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1409 struct stmmac_dma_conf *dma_conf)
1410 {
1411 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1412 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1413 u32 queue;
1414
1415 /* Clear the RX descriptors */
1416 for (queue = 0; queue < rx_queue_cnt; queue++)
1417 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1418
1419 /* Clear the TX descriptors */
1420 for (queue = 0; queue < tx_queue_cnt; queue++)
1421 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1422 }
1423
1424 /**
1425 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1426 * @priv: driver private structure
1427 * @dma_conf: structure to take the dma data
1428 * @p: descriptor pointer
1429 * @i: descriptor index
1430 * @flags: gfp flag
1431 * @queue: RX queue index
1432 * Description: this function is called to allocate a receive buffer, perform
1433 * the DMA mapping and init the descriptor.
1434 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1435 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1436 struct stmmac_dma_conf *dma_conf,
1437 struct dma_desc *p,
1438 int i, gfp_t flags, u32 queue)
1439 {
1440 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1441 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1442 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1443
1444 if (priv->dma_cap.host_dma_width <= 32)
1445 gfp |= GFP_DMA32;
1446
1447 if (!buf->page) {
1448 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1449 if (!buf->page)
1450 return -ENOMEM;
1451 buf->page_offset = stmmac_rx_offset(priv);
1452 }
1453
1454 if (priv->sph && !buf->sec_page) {
1455 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1456 if (!buf->sec_page)
1457 return -ENOMEM;
1458
1459 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1460 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1461 } else {
1462 buf->sec_page = NULL;
1463 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1464 }
1465
1466 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1467
1468 stmmac_set_desc_addr(priv, p, buf->addr);
1469 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1470 stmmac_init_desc3(priv, p);
1471
1472 return 0;
1473 }
1474
1475 /**
1476 * stmmac_free_rx_buffer - free RX dma buffers
1477 * @priv: private structure
1478 * @rx_q: RX queue
1479 * @i: buffer index.
1480 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1481 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1482 struct stmmac_rx_queue *rx_q,
1483 int i)
1484 {
1485 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1486
1487 if (buf->page)
1488 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1489 buf->page = NULL;
1490
1491 if (buf->sec_page)
1492 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1493 buf->sec_page = NULL;
1494 }
1495
1496 /**
1497 * stmmac_free_tx_buffer - free RX dma buffers
1498 * @priv: private structure
1499 * @dma_conf: structure to take the dma data
1500 * @queue: RX queue index
1501 * @i: buffer index.
1502 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1503 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1504 struct stmmac_dma_conf *dma_conf,
1505 u32 queue, int i)
1506 {
1507 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1508
1509 if (tx_q->tx_skbuff_dma[i].buf &&
1510 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1511 if (tx_q->tx_skbuff_dma[i].map_as_page)
1512 dma_unmap_page(priv->device,
1513 tx_q->tx_skbuff_dma[i].buf,
1514 tx_q->tx_skbuff_dma[i].len,
1515 DMA_TO_DEVICE);
1516 else
1517 dma_unmap_single(priv->device,
1518 tx_q->tx_skbuff_dma[i].buf,
1519 tx_q->tx_skbuff_dma[i].len,
1520 DMA_TO_DEVICE);
1521 }
1522
1523 if (tx_q->xdpf[i] &&
1524 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1525 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1526 xdp_return_frame(tx_q->xdpf[i]);
1527 tx_q->xdpf[i] = NULL;
1528 }
1529
1530 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1531 tx_q->xsk_frames_done++;
1532
1533 if (tx_q->tx_skbuff[i] &&
1534 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1535 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1536 tx_q->tx_skbuff[i] = NULL;
1537 }
1538
1539 tx_q->tx_skbuff_dma[i].buf = 0;
1540 tx_q->tx_skbuff_dma[i].map_as_page = false;
1541 }
1542
1543 /**
1544 * dma_free_rx_skbufs - free RX dma buffers
1545 * @priv: private structure
1546 * @dma_conf: structure to take the dma data
1547 * @queue: RX queue index
1548 */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1549 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1550 struct stmmac_dma_conf *dma_conf,
1551 u32 queue)
1552 {
1553 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1554 int i;
1555
1556 for (i = 0; i < dma_conf->dma_rx_size; i++)
1557 stmmac_free_rx_buffer(priv, rx_q, i);
1558 }
1559
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1560 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1561 struct stmmac_dma_conf *dma_conf,
1562 u32 queue, gfp_t flags)
1563 {
1564 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1565 int i;
1566
1567 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1568 struct dma_desc *p;
1569 int ret;
1570
1571 if (priv->extend_desc)
1572 p = &((rx_q->dma_erx + i)->basic);
1573 else
1574 p = rx_q->dma_rx + i;
1575
1576 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1577 queue);
1578 if (ret)
1579 return ret;
1580
1581 rx_q->buf_alloc_num++;
1582 }
1583
1584 return 0;
1585 }
1586
1587 /**
1588 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1589 * @priv: private structure
1590 * @dma_conf: structure to take the dma data
1591 * @queue: RX queue index
1592 */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1593 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1594 struct stmmac_dma_conf *dma_conf,
1595 u32 queue)
1596 {
1597 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1598 int i;
1599
1600 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1601 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1602
1603 if (!buf->xdp)
1604 continue;
1605
1606 xsk_buff_free(buf->xdp);
1607 buf->xdp = NULL;
1608 }
1609 }
1610
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1611 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1612 struct stmmac_dma_conf *dma_conf,
1613 u32 queue)
1614 {
1615 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1616 int i;
1617
1618 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1619 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1620 * use this macro to make sure no size violations.
1621 */
1622 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1623
1624 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1625 struct stmmac_rx_buffer *buf;
1626 dma_addr_t dma_addr;
1627 struct dma_desc *p;
1628
1629 if (priv->extend_desc)
1630 p = (struct dma_desc *)(rx_q->dma_erx + i);
1631 else
1632 p = rx_q->dma_rx + i;
1633
1634 buf = &rx_q->buf_pool[i];
1635
1636 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1637 if (!buf->xdp)
1638 return -ENOMEM;
1639
1640 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1641 stmmac_set_desc_addr(priv, p, dma_addr);
1642 rx_q->buf_alloc_num++;
1643 }
1644
1645 return 0;
1646 }
1647
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1648 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1649 {
1650 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1651 return NULL;
1652
1653 return xsk_get_pool_from_qid(priv->dev, queue);
1654 }
1655
1656 /**
1657 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1658 * @priv: driver private structure
1659 * @dma_conf: structure to take the dma data
1660 * @queue: RX queue index
1661 * @flags: gfp flag.
1662 * Description: this function initializes the DMA RX descriptors
1663 * and allocates the socket buffers. It supports the chained and ring
1664 * modes.
1665 */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1666 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1667 struct stmmac_dma_conf *dma_conf,
1668 u32 queue, gfp_t flags)
1669 {
1670 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1671 int ret;
1672
1673 netif_dbg(priv, probe, priv->dev,
1674 "(%s) dma_rx_phy=0x%08x\n", __func__,
1675 (u32)rx_q->dma_rx_phy);
1676
1677 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1678
1679 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1680
1681 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1682
1683 if (rx_q->xsk_pool) {
1684 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1685 MEM_TYPE_XSK_BUFF_POOL,
1686 NULL));
1687 netdev_info(priv->dev,
1688 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1689 rx_q->queue_index);
1690 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1691 } else {
1692 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1693 MEM_TYPE_PAGE_POOL,
1694 rx_q->page_pool));
1695 netdev_info(priv->dev,
1696 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1697 rx_q->queue_index);
1698 }
1699
1700 if (rx_q->xsk_pool) {
1701 /* RX XDP ZC buffer pool may not be populated, e.g.
1702 * xdpsock TX-only.
1703 */
1704 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1705 } else {
1706 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1707 if (ret < 0)
1708 return -ENOMEM;
1709 }
1710
1711 /* Setup the chained descriptor addresses */
1712 if (priv->mode == STMMAC_CHAIN_MODE) {
1713 if (priv->extend_desc)
1714 stmmac_mode_init(priv, rx_q->dma_erx,
1715 rx_q->dma_rx_phy,
1716 dma_conf->dma_rx_size, 1);
1717 else
1718 stmmac_mode_init(priv, rx_q->dma_rx,
1719 rx_q->dma_rx_phy,
1720 dma_conf->dma_rx_size, 0);
1721 }
1722
1723 return 0;
1724 }
1725
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1726 static int init_dma_rx_desc_rings(struct net_device *dev,
1727 struct stmmac_dma_conf *dma_conf,
1728 gfp_t flags)
1729 {
1730 struct stmmac_priv *priv = netdev_priv(dev);
1731 u32 rx_count = priv->plat->rx_queues_to_use;
1732 int queue;
1733 int ret;
1734
1735 /* RX INITIALIZATION */
1736 netif_dbg(priv, probe, priv->dev,
1737 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1738
1739 for (queue = 0; queue < rx_count; queue++) {
1740 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1741 if (ret)
1742 goto err_init_rx_buffers;
1743 }
1744
1745 return 0;
1746
1747 err_init_rx_buffers:
1748 while (queue >= 0) {
1749 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1750
1751 if (rx_q->xsk_pool)
1752 dma_free_rx_xskbufs(priv, dma_conf, queue);
1753 else
1754 dma_free_rx_skbufs(priv, dma_conf, queue);
1755
1756 rx_q->buf_alloc_num = 0;
1757 rx_q->xsk_pool = NULL;
1758
1759 queue--;
1760 }
1761
1762 return ret;
1763 }
1764
1765 /**
1766 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1767 * @priv: driver private structure
1768 * @dma_conf: structure to take the dma data
1769 * @queue: TX queue index
1770 * Description: this function initializes the DMA TX descriptors
1771 * and allocates the socket buffers. It supports the chained and ring
1772 * modes.
1773 */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1774 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1775 struct stmmac_dma_conf *dma_conf,
1776 u32 queue)
1777 {
1778 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1779 int i;
1780
1781 netif_dbg(priv, probe, priv->dev,
1782 "(%s) dma_tx_phy=0x%08x\n", __func__,
1783 (u32)tx_q->dma_tx_phy);
1784
1785 /* Setup the chained descriptor addresses */
1786 if (priv->mode == STMMAC_CHAIN_MODE) {
1787 if (priv->extend_desc)
1788 stmmac_mode_init(priv, tx_q->dma_etx,
1789 tx_q->dma_tx_phy,
1790 dma_conf->dma_tx_size, 1);
1791 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1792 stmmac_mode_init(priv, tx_q->dma_tx,
1793 tx_q->dma_tx_phy,
1794 dma_conf->dma_tx_size, 0);
1795 }
1796
1797 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1798
1799 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1800 struct dma_desc *p;
1801
1802 if (priv->extend_desc)
1803 p = &((tx_q->dma_etx + i)->basic);
1804 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1805 p = &((tx_q->dma_entx + i)->basic);
1806 else
1807 p = tx_q->dma_tx + i;
1808
1809 stmmac_clear_desc(priv, p);
1810
1811 tx_q->tx_skbuff_dma[i].buf = 0;
1812 tx_q->tx_skbuff_dma[i].map_as_page = false;
1813 tx_q->tx_skbuff_dma[i].len = 0;
1814 tx_q->tx_skbuff_dma[i].last_segment = false;
1815 tx_q->tx_skbuff[i] = NULL;
1816 }
1817
1818 return 0;
1819 }
1820
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1821 static int init_dma_tx_desc_rings(struct net_device *dev,
1822 struct stmmac_dma_conf *dma_conf)
1823 {
1824 struct stmmac_priv *priv = netdev_priv(dev);
1825 u32 tx_queue_cnt;
1826 u32 queue;
1827
1828 tx_queue_cnt = priv->plat->tx_queues_to_use;
1829
1830 for (queue = 0; queue < tx_queue_cnt; queue++)
1831 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1832
1833 return 0;
1834 }
1835
1836 /**
1837 * init_dma_desc_rings - init the RX/TX descriptor rings
1838 * @dev: net device structure
1839 * @dma_conf: structure to take the dma data
1840 * @flags: gfp flag.
1841 * Description: this function initializes the DMA RX/TX descriptors
1842 * and allocates the socket buffers. It supports the chained and ring
1843 * modes.
1844 */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1845 static int init_dma_desc_rings(struct net_device *dev,
1846 struct stmmac_dma_conf *dma_conf,
1847 gfp_t flags)
1848 {
1849 struct stmmac_priv *priv = netdev_priv(dev);
1850 int ret;
1851
1852 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1853 if (ret)
1854 return ret;
1855
1856 ret = init_dma_tx_desc_rings(dev, dma_conf);
1857
1858 stmmac_clear_descriptors(priv, dma_conf);
1859
1860 if (netif_msg_hw(priv))
1861 stmmac_display_rings(priv, dma_conf);
1862
1863 return ret;
1864 }
1865
1866 /**
1867 * dma_free_tx_skbufs - free TX dma buffers
1868 * @priv: private structure
1869 * @dma_conf: structure to take the dma data
1870 * @queue: TX queue index
1871 */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1872 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1873 struct stmmac_dma_conf *dma_conf,
1874 u32 queue)
1875 {
1876 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1877 int i;
1878
1879 tx_q->xsk_frames_done = 0;
1880
1881 for (i = 0; i < dma_conf->dma_tx_size; i++)
1882 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1883
1884 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1885 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1886 tx_q->xsk_frames_done = 0;
1887 tx_q->xsk_pool = NULL;
1888 }
1889 }
1890
1891 /**
1892 * stmmac_free_tx_skbufs - free TX skb buffers
1893 * @priv: private structure
1894 */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1895 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1896 {
1897 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1898 u32 queue;
1899
1900 for (queue = 0; queue < tx_queue_cnt; queue++)
1901 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1902 }
1903
1904 /**
1905 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1906 * @priv: private structure
1907 * @dma_conf: structure to take the dma data
1908 * @queue: RX queue index
1909 */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1910 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1911 struct stmmac_dma_conf *dma_conf,
1912 u32 queue)
1913 {
1914 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1915
1916 /* Release the DMA RX socket buffers */
1917 if (rx_q->xsk_pool)
1918 dma_free_rx_xskbufs(priv, dma_conf, queue);
1919 else
1920 dma_free_rx_skbufs(priv, dma_conf, queue);
1921
1922 rx_q->buf_alloc_num = 0;
1923 rx_q->xsk_pool = NULL;
1924
1925 /* Free DMA regions of consistent memory previously allocated */
1926 if (!priv->extend_desc)
1927 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1928 sizeof(struct dma_desc),
1929 rx_q->dma_rx, rx_q->dma_rx_phy);
1930 else
1931 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1932 sizeof(struct dma_extended_desc),
1933 rx_q->dma_erx, rx_q->dma_rx_phy);
1934
1935 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1936 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1937
1938 kfree(rx_q->buf_pool);
1939 if (rx_q->page_pool)
1940 page_pool_destroy(rx_q->page_pool);
1941 }
1942
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1943 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1944 struct stmmac_dma_conf *dma_conf)
1945 {
1946 u32 rx_count = priv->plat->rx_queues_to_use;
1947 u32 queue;
1948
1949 /* Free RX queue resources */
1950 for (queue = 0; queue < rx_count; queue++)
1951 __free_dma_rx_desc_resources(priv, dma_conf, queue);
1952 }
1953
1954 /**
1955 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1956 * @priv: private structure
1957 * @dma_conf: structure to take the dma data
1958 * @queue: TX queue index
1959 */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1960 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1961 struct stmmac_dma_conf *dma_conf,
1962 u32 queue)
1963 {
1964 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1965 size_t size;
1966 void *addr;
1967
1968 /* Release the DMA TX socket buffers */
1969 dma_free_tx_skbufs(priv, dma_conf, queue);
1970
1971 if (priv->extend_desc) {
1972 size = sizeof(struct dma_extended_desc);
1973 addr = tx_q->dma_etx;
1974 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1975 size = sizeof(struct dma_edesc);
1976 addr = tx_q->dma_entx;
1977 } else {
1978 size = sizeof(struct dma_desc);
1979 addr = tx_q->dma_tx;
1980 }
1981
1982 size *= dma_conf->dma_tx_size;
1983
1984 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1985
1986 kfree(tx_q->tx_skbuff_dma);
1987 kfree(tx_q->tx_skbuff);
1988 }
1989
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1990 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1991 struct stmmac_dma_conf *dma_conf)
1992 {
1993 u32 tx_count = priv->plat->tx_queues_to_use;
1994 u32 queue;
1995
1996 /* Free TX queue resources */
1997 for (queue = 0; queue < tx_count; queue++)
1998 __free_dma_tx_desc_resources(priv, dma_conf, queue);
1999 }
2000
2001 /**
2002 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2003 * @priv: private structure
2004 * @dma_conf: structure to take the dma data
2005 * @queue: RX queue index
2006 * Description: according to which descriptor can be used (extend or basic)
2007 * this function allocates the resources for TX and RX paths. In case of
2008 * reception, for example, it pre-allocated the RX socket buffer in order to
2009 * allow zero-copy mechanism.
2010 */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2011 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2012 struct stmmac_dma_conf *dma_conf,
2013 u32 queue)
2014 {
2015 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2016 struct stmmac_channel *ch = &priv->channel[queue];
2017 bool xdp_prog = stmmac_xdp_is_enabled(priv);
2018 struct page_pool_params pp_params = { 0 };
2019 unsigned int num_pages;
2020 unsigned int napi_id;
2021 int ret;
2022
2023 rx_q->queue_index = queue;
2024 rx_q->priv_data = priv;
2025
2026 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2027 pp_params.pool_size = dma_conf->dma_rx_size;
2028 num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2029 pp_params.order = ilog2(num_pages);
2030 pp_params.nid = dev_to_node(priv->device);
2031 pp_params.dev = priv->device;
2032 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2033 pp_params.offset = stmmac_rx_offset(priv);
2034 pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2035
2036 rx_q->page_pool = page_pool_create(&pp_params);
2037 if (IS_ERR(rx_q->page_pool)) {
2038 ret = PTR_ERR(rx_q->page_pool);
2039 rx_q->page_pool = NULL;
2040 return ret;
2041 }
2042
2043 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2044 sizeof(*rx_q->buf_pool),
2045 GFP_KERNEL);
2046 if (!rx_q->buf_pool)
2047 return -ENOMEM;
2048
2049 if (priv->extend_desc) {
2050 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2051 dma_conf->dma_rx_size *
2052 sizeof(struct dma_extended_desc),
2053 &rx_q->dma_rx_phy,
2054 GFP_KERNEL);
2055 if (!rx_q->dma_erx)
2056 return -ENOMEM;
2057
2058 } else {
2059 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2060 dma_conf->dma_rx_size *
2061 sizeof(struct dma_desc),
2062 &rx_q->dma_rx_phy,
2063 GFP_KERNEL);
2064 if (!rx_q->dma_rx)
2065 return -ENOMEM;
2066 }
2067
2068 if (stmmac_xdp_is_enabled(priv) &&
2069 test_bit(queue, priv->af_xdp_zc_qps))
2070 napi_id = ch->rxtx_napi.napi_id;
2071 else
2072 napi_id = ch->rx_napi.napi_id;
2073
2074 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2075 rx_q->queue_index,
2076 napi_id);
2077 if (ret) {
2078 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2079 return -EINVAL;
2080 }
2081
2082 return 0;
2083 }
2084
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2085 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2086 struct stmmac_dma_conf *dma_conf)
2087 {
2088 u32 rx_count = priv->plat->rx_queues_to_use;
2089 u32 queue;
2090 int ret;
2091
2092 /* RX queues buffers and DMA */
2093 for (queue = 0; queue < rx_count; queue++) {
2094 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2095 if (ret)
2096 goto err_dma;
2097 }
2098
2099 return 0;
2100
2101 err_dma:
2102 free_dma_rx_desc_resources(priv, dma_conf);
2103
2104 return ret;
2105 }
2106
2107 /**
2108 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2109 * @priv: private structure
2110 * @dma_conf: structure to take the dma data
2111 * @queue: TX queue index
2112 * Description: according to which descriptor can be used (extend or basic)
2113 * this function allocates the resources for TX and RX paths. In case of
2114 * reception, for example, it pre-allocated the RX socket buffer in order to
2115 * allow zero-copy mechanism.
2116 */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2117 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2118 struct stmmac_dma_conf *dma_conf,
2119 u32 queue)
2120 {
2121 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2122 size_t size;
2123 void *addr;
2124
2125 tx_q->queue_index = queue;
2126 tx_q->priv_data = priv;
2127
2128 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2129 sizeof(*tx_q->tx_skbuff_dma),
2130 GFP_KERNEL);
2131 if (!tx_q->tx_skbuff_dma)
2132 return -ENOMEM;
2133
2134 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2135 sizeof(struct sk_buff *),
2136 GFP_KERNEL);
2137 if (!tx_q->tx_skbuff)
2138 return -ENOMEM;
2139
2140 if (priv->extend_desc)
2141 size = sizeof(struct dma_extended_desc);
2142 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2143 size = sizeof(struct dma_edesc);
2144 else
2145 size = sizeof(struct dma_desc);
2146
2147 size *= dma_conf->dma_tx_size;
2148
2149 addr = dma_alloc_coherent(priv->device, size,
2150 &tx_q->dma_tx_phy, GFP_KERNEL);
2151 if (!addr)
2152 return -ENOMEM;
2153
2154 if (priv->extend_desc)
2155 tx_q->dma_etx = addr;
2156 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2157 tx_q->dma_entx = addr;
2158 else
2159 tx_q->dma_tx = addr;
2160
2161 return 0;
2162 }
2163
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2164 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2165 struct stmmac_dma_conf *dma_conf)
2166 {
2167 u32 tx_count = priv->plat->tx_queues_to_use;
2168 u32 queue;
2169 int ret;
2170
2171 /* TX queues buffers and DMA */
2172 for (queue = 0; queue < tx_count; queue++) {
2173 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2174 if (ret)
2175 goto err_dma;
2176 }
2177
2178 return 0;
2179
2180 err_dma:
2181 free_dma_tx_desc_resources(priv, dma_conf);
2182 return ret;
2183 }
2184
2185 /**
2186 * alloc_dma_desc_resources - alloc TX/RX resources.
2187 * @priv: private structure
2188 * @dma_conf: structure to take the dma data
2189 * Description: according to which descriptor can be used (extend or basic)
2190 * this function allocates the resources for TX and RX paths. In case of
2191 * reception, for example, it pre-allocated the RX socket buffer in order to
2192 * allow zero-copy mechanism.
2193 */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2194 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2195 struct stmmac_dma_conf *dma_conf)
2196 {
2197 /* RX Allocation */
2198 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2199
2200 if (ret)
2201 return ret;
2202
2203 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2204
2205 return ret;
2206 }
2207
2208 /**
2209 * free_dma_desc_resources - free dma desc resources
2210 * @priv: private structure
2211 * @dma_conf: structure to take the dma data
2212 */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2213 static void free_dma_desc_resources(struct stmmac_priv *priv,
2214 struct stmmac_dma_conf *dma_conf)
2215 {
2216 /* Release the DMA TX socket buffers */
2217 free_dma_tx_desc_resources(priv, dma_conf);
2218
2219 /* Release the DMA RX socket buffers later
2220 * to ensure all pending XDP_TX buffers are returned.
2221 */
2222 free_dma_rx_desc_resources(priv, dma_conf);
2223 }
2224
2225 /**
2226 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2227 * @priv: driver private structure
2228 * Description: It is used for enabling the rx queues in the MAC
2229 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2230 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2231 {
2232 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2233 int queue;
2234 u8 mode;
2235
2236 for (queue = 0; queue < rx_queues_count; queue++) {
2237 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2238 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2239 }
2240 }
2241
2242 /**
2243 * stmmac_start_rx_dma - start RX DMA channel
2244 * @priv: driver private structure
2245 * @chan: RX channel index
2246 * Description:
2247 * This starts a RX DMA channel
2248 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2249 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2250 {
2251 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2252 stmmac_start_rx(priv, priv->ioaddr, chan);
2253 }
2254
2255 /**
2256 * stmmac_start_tx_dma - start TX DMA channel
2257 * @priv: driver private structure
2258 * @chan: TX channel index
2259 * Description:
2260 * This starts a TX DMA channel
2261 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2262 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2263 {
2264 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2265 stmmac_start_tx(priv, priv->ioaddr, chan);
2266 }
2267
2268 /**
2269 * stmmac_stop_rx_dma - stop RX DMA channel
2270 * @priv: driver private structure
2271 * @chan: RX channel index
2272 * Description:
2273 * This stops a RX DMA channel
2274 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2275 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2276 {
2277 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2278 stmmac_stop_rx(priv, priv->ioaddr, chan);
2279 }
2280
2281 /**
2282 * stmmac_stop_tx_dma - stop TX DMA channel
2283 * @priv: driver private structure
2284 * @chan: TX channel index
2285 * Description:
2286 * This stops a TX DMA channel
2287 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2288 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2289 {
2290 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2291 stmmac_stop_tx(priv, priv->ioaddr, chan);
2292 }
2293
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2294 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2295 {
2296 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2297 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2298 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2299 u32 chan;
2300
2301 for (chan = 0; chan < dma_csr_ch; chan++) {
2302 struct stmmac_channel *ch = &priv->channel[chan];
2303 unsigned long flags;
2304
2305 spin_lock_irqsave(&ch->lock, flags);
2306 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2307 spin_unlock_irqrestore(&ch->lock, flags);
2308 }
2309 }
2310
2311 /**
2312 * stmmac_start_all_dma - start all RX and TX DMA channels
2313 * @priv: driver private structure
2314 * Description:
2315 * This starts all the RX and TX DMA channels
2316 */
stmmac_start_all_dma(struct stmmac_priv * priv)2317 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2318 {
2319 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2320 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2321 u32 chan = 0;
2322
2323 for (chan = 0; chan < rx_channels_count; chan++)
2324 stmmac_start_rx_dma(priv, chan);
2325
2326 for (chan = 0; chan < tx_channels_count; chan++)
2327 stmmac_start_tx_dma(priv, chan);
2328 }
2329
2330 /**
2331 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2332 * @priv: driver private structure
2333 * Description:
2334 * This stops the RX and TX DMA channels
2335 */
stmmac_stop_all_dma(struct stmmac_priv * priv)2336 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2337 {
2338 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2339 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2340 u32 chan = 0;
2341
2342 for (chan = 0; chan < rx_channels_count; chan++)
2343 stmmac_stop_rx_dma(priv, chan);
2344
2345 for (chan = 0; chan < tx_channels_count; chan++)
2346 stmmac_stop_tx_dma(priv, chan);
2347 }
2348
2349 /**
2350 * stmmac_dma_operation_mode - HW DMA operation mode
2351 * @priv: driver private structure
2352 * Description: it is used for configuring the DMA operation mode register in
2353 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2354 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2355 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2356 {
2357 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2358 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2359 int rxfifosz = priv->plat->rx_fifo_size;
2360 int txfifosz = priv->plat->tx_fifo_size;
2361 u32 txmode = 0;
2362 u32 rxmode = 0;
2363 u32 chan = 0;
2364 u8 qmode = 0;
2365
2366 if (rxfifosz == 0)
2367 rxfifosz = priv->dma_cap.rx_fifo_size;
2368 if (txfifosz == 0)
2369 txfifosz = priv->dma_cap.tx_fifo_size;
2370
2371 /* Adjust for real per queue fifo size */
2372 rxfifosz /= rx_channels_count;
2373 txfifosz /= tx_channels_count;
2374
2375 if (priv->plat->force_thresh_dma_mode) {
2376 txmode = tc;
2377 rxmode = tc;
2378 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2379 /*
2380 * In case of GMAC, SF mode can be enabled
2381 * to perform the TX COE in HW. This depends on:
2382 * 1) TX COE if actually supported
2383 * 2) There is no bugged Jumbo frame support
2384 * that needs to not insert csum in the TDES.
2385 */
2386 txmode = SF_DMA_MODE;
2387 rxmode = SF_DMA_MODE;
2388 priv->xstats.threshold = SF_DMA_MODE;
2389 } else {
2390 txmode = tc;
2391 rxmode = SF_DMA_MODE;
2392 }
2393
2394 /* configure all channels */
2395 for (chan = 0; chan < rx_channels_count; chan++) {
2396 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2397 u32 buf_size;
2398
2399 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2400
2401 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2402 rxfifosz, qmode);
2403
2404 if (rx_q->xsk_pool) {
2405 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2406 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2407 buf_size,
2408 chan);
2409 } else {
2410 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2411 priv->dma_conf.dma_buf_sz,
2412 chan);
2413 }
2414 }
2415
2416 for (chan = 0; chan < tx_channels_count; chan++) {
2417 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2418
2419 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2420 txfifosz, qmode);
2421 }
2422 }
2423
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2424 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2425 {
2426 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2427 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2428 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2429 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2430 unsigned int entry = tx_q->cur_tx;
2431 struct dma_desc *tx_desc = NULL;
2432 struct xdp_desc xdp_desc;
2433 bool work_done = true;
2434 u32 tx_set_ic_bit = 0;
2435
2436 /* Avoids TX time-out as we are sharing with slow path */
2437 txq_trans_cond_update(nq);
2438
2439 budget = min(budget, stmmac_tx_avail(priv, queue));
2440
2441 while (budget-- > 0) {
2442 dma_addr_t dma_addr;
2443 bool set_ic;
2444
2445 /* We are sharing with slow path and stop XSK TX desc submission when
2446 * available TX ring is less than threshold.
2447 */
2448 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2449 !netif_carrier_ok(priv->dev)) {
2450 work_done = false;
2451 break;
2452 }
2453
2454 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2455 break;
2456
2457 if (likely(priv->extend_desc))
2458 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2459 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2460 tx_desc = &tx_q->dma_entx[entry].basic;
2461 else
2462 tx_desc = tx_q->dma_tx + entry;
2463
2464 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2465 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2466
2467 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2468
2469 /* To return XDP buffer to XSK pool, we simple call
2470 * xsk_tx_completed(), so we don't need to fill up
2471 * 'buf' and 'xdpf'.
2472 */
2473 tx_q->tx_skbuff_dma[entry].buf = 0;
2474 tx_q->xdpf[entry] = NULL;
2475
2476 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2477 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2478 tx_q->tx_skbuff_dma[entry].last_segment = true;
2479 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2480
2481 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2482
2483 tx_q->tx_count_frames++;
2484
2485 if (!priv->tx_coal_frames[queue])
2486 set_ic = false;
2487 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2488 set_ic = true;
2489 else
2490 set_ic = false;
2491
2492 if (set_ic) {
2493 tx_q->tx_count_frames = 0;
2494 stmmac_set_tx_ic(priv, tx_desc);
2495 tx_set_ic_bit++;
2496 }
2497
2498 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2499 true, priv->mode, true, true,
2500 xdp_desc.len);
2501
2502 stmmac_enable_dma_transmission(priv, priv->ioaddr);
2503
2504 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2505 entry = tx_q->cur_tx;
2506 }
2507 u64_stats_update_begin(&txq_stats->napi_syncp);
2508 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2509 u64_stats_update_end(&txq_stats->napi_syncp);
2510
2511 if (tx_desc) {
2512 stmmac_flush_tx_descriptors(priv, queue);
2513 xsk_tx_release(pool);
2514 }
2515
2516 /* Return true if all of the 3 conditions are met
2517 * a) TX Budget is still available
2518 * b) work_done = true when XSK TX desc peek is empty (no more
2519 * pending XSK TX for transmission)
2520 */
2521 return !!budget && work_done;
2522 }
2523
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2524 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2525 {
2526 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2527 tc += 64;
2528
2529 if (priv->plat->force_thresh_dma_mode)
2530 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2531 else
2532 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2533 chan);
2534
2535 priv->xstats.threshold = tc;
2536 }
2537 }
2538
2539 /**
2540 * stmmac_tx_clean - to manage the transmission completion
2541 * @priv: driver private structure
2542 * @budget: napi budget limiting this functions packet handling
2543 * @queue: TX queue index
2544 * Description: it reclaims the transmit resources after transmission completes.
2545 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue)2546 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2547 {
2548 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2549 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2550 unsigned int bytes_compl = 0, pkts_compl = 0;
2551 unsigned int entry, xmits = 0, count = 0;
2552 u32 tx_packets = 0, tx_errors = 0;
2553
2554 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2555
2556 tx_q->xsk_frames_done = 0;
2557
2558 entry = tx_q->dirty_tx;
2559
2560 /* Try to clean all TX complete frame in 1 shot */
2561 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2562 struct xdp_frame *xdpf;
2563 struct sk_buff *skb;
2564 struct dma_desc *p;
2565 int status;
2566
2567 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2568 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2569 xdpf = tx_q->xdpf[entry];
2570 skb = NULL;
2571 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2572 xdpf = NULL;
2573 skb = tx_q->tx_skbuff[entry];
2574 } else {
2575 xdpf = NULL;
2576 skb = NULL;
2577 }
2578
2579 if (priv->extend_desc)
2580 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2581 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2582 p = &tx_q->dma_entx[entry].basic;
2583 else
2584 p = tx_q->dma_tx + entry;
2585
2586 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2587 /* Check if the descriptor is owned by the DMA */
2588 if (unlikely(status & tx_dma_own))
2589 break;
2590
2591 count++;
2592
2593 /* Make sure descriptor fields are read after reading
2594 * the own bit.
2595 */
2596 dma_rmb();
2597
2598 /* Just consider the last segment and ...*/
2599 if (likely(!(status & tx_not_ls))) {
2600 /* ... verify the status error condition */
2601 if (unlikely(status & tx_err)) {
2602 tx_errors++;
2603 if (unlikely(status & tx_err_bump_tc))
2604 stmmac_bump_dma_threshold(priv, queue);
2605 } else {
2606 tx_packets++;
2607 }
2608 if (skb)
2609 stmmac_get_tx_hwtstamp(priv, p, skb);
2610 }
2611
2612 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2613 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2614 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2615 dma_unmap_page(priv->device,
2616 tx_q->tx_skbuff_dma[entry].buf,
2617 tx_q->tx_skbuff_dma[entry].len,
2618 DMA_TO_DEVICE);
2619 else
2620 dma_unmap_single(priv->device,
2621 tx_q->tx_skbuff_dma[entry].buf,
2622 tx_q->tx_skbuff_dma[entry].len,
2623 DMA_TO_DEVICE);
2624 tx_q->tx_skbuff_dma[entry].buf = 0;
2625 tx_q->tx_skbuff_dma[entry].len = 0;
2626 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2627 }
2628
2629 stmmac_clean_desc3(priv, tx_q, p);
2630
2631 tx_q->tx_skbuff_dma[entry].last_segment = false;
2632 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2633
2634 if (xdpf &&
2635 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2636 xdp_return_frame_rx_napi(xdpf);
2637 tx_q->xdpf[entry] = NULL;
2638 }
2639
2640 if (xdpf &&
2641 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2642 xdp_return_frame(xdpf);
2643 tx_q->xdpf[entry] = NULL;
2644 }
2645
2646 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2647 tx_q->xsk_frames_done++;
2648
2649 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2650 if (likely(skb)) {
2651 pkts_compl++;
2652 bytes_compl += skb->len;
2653 dev_consume_skb_any(skb);
2654 tx_q->tx_skbuff[entry] = NULL;
2655 }
2656 }
2657
2658 stmmac_release_tx_desc(priv, p, priv->mode);
2659
2660 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2661 }
2662 tx_q->dirty_tx = entry;
2663
2664 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2665 pkts_compl, bytes_compl);
2666
2667 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2668 queue))) &&
2669 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2670
2671 netif_dbg(priv, tx_done, priv->dev,
2672 "%s: restart transmit\n", __func__);
2673 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2674 }
2675
2676 if (tx_q->xsk_pool) {
2677 bool work_done;
2678
2679 if (tx_q->xsk_frames_done)
2680 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2681
2682 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2683 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2684
2685 /* For XSK TX, we try to send as many as possible.
2686 * If XSK work done (XSK TX desc empty and budget still
2687 * available), return "budget - 1" to reenable TX IRQ.
2688 * Else, return "budget" to make NAPI continue polling.
2689 */
2690 work_done = stmmac_xdp_xmit_zc(priv, queue,
2691 STMMAC_XSK_TX_BUDGET_MAX);
2692 if (work_done)
2693 xmits = budget - 1;
2694 else
2695 xmits = budget;
2696 }
2697
2698 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2699 priv->eee_sw_timer_en) {
2700 if (stmmac_enable_eee_mode(priv))
2701 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2702 }
2703
2704 /* We still have pending packets, let's call for a new scheduling */
2705 if (tx_q->dirty_tx != tx_q->cur_tx)
2706 stmmac_tx_timer_arm(priv, queue);
2707
2708 u64_stats_update_begin(&txq_stats->napi_syncp);
2709 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2710 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2711 u64_stats_inc(&txq_stats->napi.tx_clean);
2712 u64_stats_update_end(&txq_stats->napi_syncp);
2713
2714 priv->xstats.tx_errors += tx_errors;
2715
2716 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2717
2718 /* Combine decisions from TX clean and XSK TX */
2719 return max(count, xmits);
2720 }
2721
2722 /**
2723 * stmmac_tx_err - to manage the tx error
2724 * @priv: driver private structure
2725 * @chan: channel index
2726 * Description: it cleans the descriptors and restarts the transmission
2727 * in case of transmission errors.
2728 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2729 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2730 {
2731 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2732
2733 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2734
2735 stmmac_stop_tx_dma(priv, chan);
2736 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2737 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2738 stmmac_reset_tx_queue(priv, chan);
2739 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2740 tx_q->dma_tx_phy, chan);
2741 stmmac_start_tx_dma(priv, chan);
2742
2743 priv->xstats.tx_errors++;
2744 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2745 }
2746
2747 /**
2748 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2749 * @priv: driver private structure
2750 * @txmode: TX operating mode
2751 * @rxmode: RX operating mode
2752 * @chan: channel index
2753 * Description: it is used for configuring of the DMA operation mode in
2754 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2755 * mode.
2756 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2757 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2758 u32 rxmode, u32 chan)
2759 {
2760 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2761 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2762 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2763 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2764 int rxfifosz = priv->plat->rx_fifo_size;
2765 int txfifosz = priv->plat->tx_fifo_size;
2766
2767 if (rxfifosz == 0)
2768 rxfifosz = priv->dma_cap.rx_fifo_size;
2769 if (txfifosz == 0)
2770 txfifosz = priv->dma_cap.tx_fifo_size;
2771
2772 /* Adjust for real per queue fifo size */
2773 rxfifosz /= rx_channels_count;
2774 txfifosz /= tx_channels_count;
2775
2776 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2777 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2778 }
2779
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2780 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2781 {
2782 int ret;
2783
2784 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2785 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2786 if (ret && (ret != -EINVAL)) {
2787 stmmac_global_err(priv);
2788 return true;
2789 }
2790
2791 return false;
2792 }
2793
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2794 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2795 {
2796 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2797 &priv->xstats, chan, dir);
2798 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2799 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2800 struct stmmac_channel *ch = &priv->channel[chan];
2801 struct napi_struct *rx_napi;
2802 struct napi_struct *tx_napi;
2803 unsigned long flags;
2804
2805 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2806 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2807
2808 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2809 if (napi_schedule_prep(rx_napi)) {
2810 spin_lock_irqsave(&ch->lock, flags);
2811 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2812 spin_unlock_irqrestore(&ch->lock, flags);
2813 __napi_schedule(rx_napi);
2814 }
2815 }
2816
2817 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2818 if (napi_schedule_prep(tx_napi)) {
2819 spin_lock_irqsave(&ch->lock, flags);
2820 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2821 spin_unlock_irqrestore(&ch->lock, flags);
2822 __napi_schedule(tx_napi);
2823 }
2824 }
2825
2826 return status;
2827 }
2828
2829 /**
2830 * stmmac_dma_interrupt - DMA ISR
2831 * @priv: driver private structure
2832 * Description: this is the DMA ISR. It is called by the main ISR.
2833 * It calls the dwmac dma routine and schedule poll method in case of some
2834 * work can be done.
2835 */
stmmac_dma_interrupt(struct stmmac_priv * priv)2836 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2837 {
2838 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2839 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2840 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2841 tx_channel_count : rx_channel_count;
2842 u32 chan;
2843 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2844
2845 /* Make sure we never check beyond our status buffer. */
2846 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2847 channels_to_check = ARRAY_SIZE(status);
2848
2849 for (chan = 0; chan < channels_to_check; chan++)
2850 status[chan] = stmmac_napi_check(priv, chan,
2851 DMA_DIR_RXTX);
2852
2853 for (chan = 0; chan < tx_channel_count; chan++) {
2854 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2855 /* Try to bump up the dma threshold on this failure */
2856 stmmac_bump_dma_threshold(priv, chan);
2857 } else if (unlikely(status[chan] == tx_hard_error)) {
2858 stmmac_tx_err(priv, chan);
2859 }
2860 }
2861 }
2862
2863 /**
2864 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2865 * @priv: driver private structure
2866 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2867 */
stmmac_mmc_setup(struct stmmac_priv * priv)2868 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2869 {
2870 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2871 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2872
2873 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2874
2875 if (priv->dma_cap.rmon) {
2876 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2877 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2878 } else
2879 netdev_info(priv->dev, "No MAC Management Counters available\n");
2880 }
2881
2882 /**
2883 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2884 * @priv: driver private structure
2885 * Description:
2886 * new GMAC chip generations have a new register to indicate the
2887 * presence of the optional feature/functions.
2888 * This can be also used to override the value passed through the
2889 * platform and necessary for old MAC10/100 and GMAC chips.
2890 */
stmmac_get_hw_features(struct stmmac_priv * priv)2891 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2892 {
2893 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2894 }
2895
2896 /**
2897 * stmmac_check_ether_addr - check if the MAC addr is valid
2898 * @priv: driver private structure
2899 * Description:
2900 * it is to verify if the MAC address is valid, in case of failures it
2901 * generates a random MAC address
2902 */
stmmac_check_ether_addr(struct stmmac_priv * priv)2903 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2904 {
2905 u8 addr[ETH_ALEN];
2906
2907 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2908 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2909 if (is_valid_ether_addr(addr))
2910 eth_hw_addr_set(priv->dev, addr);
2911 else
2912 eth_hw_addr_random(priv->dev);
2913 dev_info(priv->device, "device MAC address %pM\n",
2914 priv->dev->dev_addr);
2915 }
2916 }
2917
2918 /**
2919 * stmmac_init_dma_engine - DMA init.
2920 * @priv: driver private structure
2921 * Description:
2922 * It inits the DMA invoking the specific MAC/GMAC callback.
2923 * Some DMA parameters can be passed from the platform;
2924 * in case of these are not passed a default is kept for the MAC or GMAC.
2925 */
stmmac_init_dma_engine(struct stmmac_priv * priv)2926 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2927 {
2928 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2929 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2930 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2931 struct stmmac_rx_queue *rx_q;
2932 struct stmmac_tx_queue *tx_q;
2933 u32 chan = 0;
2934 int atds = 0;
2935 int ret = 0;
2936
2937 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2938 dev_err(priv->device, "Invalid DMA configuration\n");
2939 return -EINVAL;
2940 }
2941
2942 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2943 atds = 1;
2944
2945 ret = stmmac_reset(priv, priv->ioaddr);
2946 if (ret) {
2947 dev_err(priv->device, "Failed to reset the dma\n");
2948 return ret;
2949 }
2950
2951 /* DMA Configuration */
2952 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2953
2954 if (priv->plat->axi)
2955 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2956
2957 /* DMA CSR Channel configuration */
2958 for (chan = 0; chan < dma_csr_ch; chan++) {
2959 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2960 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2961 }
2962
2963 /* DMA RX Channel Configuration */
2964 for (chan = 0; chan < rx_channels_count; chan++) {
2965 rx_q = &priv->dma_conf.rx_queue[chan];
2966
2967 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2968 rx_q->dma_rx_phy, chan);
2969
2970 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2971 (rx_q->buf_alloc_num *
2972 sizeof(struct dma_desc));
2973 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2974 rx_q->rx_tail_addr, chan);
2975 }
2976
2977 /* DMA TX Channel Configuration */
2978 for (chan = 0; chan < tx_channels_count; chan++) {
2979 tx_q = &priv->dma_conf.tx_queue[chan];
2980
2981 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2982 tx_q->dma_tx_phy, chan);
2983
2984 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2985 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2986 tx_q->tx_tail_addr, chan);
2987 }
2988
2989 return ret;
2990 }
2991
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)2992 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2993 {
2994 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2995 u32 tx_coal_timer = priv->tx_coal_timer[queue];
2996
2997 if (!tx_coal_timer)
2998 return;
2999
3000 hrtimer_start(&tx_q->txtimer,
3001 STMMAC_COAL_TIMER(tx_coal_timer),
3002 HRTIMER_MODE_REL);
3003 }
3004
3005 /**
3006 * stmmac_tx_timer - mitigation sw timer for tx.
3007 * @t: data pointer
3008 * Description:
3009 * This is the timer handler to directly invoke the stmmac_tx_clean.
3010 */
stmmac_tx_timer(struct hrtimer * t)3011 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3012 {
3013 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3014 struct stmmac_priv *priv = tx_q->priv_data;
3015 struct stmmac_channel *ch;
3016 struct napi_struct *napi;
3017
3018 ch = &priv->channel[tx_q->queue_index];
3019 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3020
3021 if (likely(napi_schedule_prep(napi))) {
3022 unsigned long flags;
3023
3024 spin_lock_irqsave(&ch->lock, flags);
3025 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3026 spin_unlock_irqrestore(&ch->lock, flags);
3027 __napi_schedule(napi);
3028 }
3029
3030 return HRTIMER_NORESTART;
3031 }
3032
3033 /**
3034 * stmmac_init_coalesce - init mitigation options.
3035 * @priv: driver private structure
3036 * Description:
3037 * This inits the coalesce parameters: i.e. timer rate,
3038 * timer handler and default threshold used for enabling the
3039 * interrupt on completion bit.
3040 */
stmmac_init_coalesce(struct stmmac_priv * priv)3041 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3042 {
3043 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3044 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3045 u32 chan;
3046
3047 for (chan = 0; chan < tx_channel_count; chan++) {
3048 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3049
3050 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3051 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3052
3053 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3054 tx_q->txtimer.function = stmmac_tx_timer;
3055 }
3056
3057 for (chan = 0; chan < rx_channel_count; chan++)
3058 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3059 }
3060
stmmac_set_rings_length(struct stmmac_priv * priv)3061 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3062 {
3063 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3064 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3065 u32 chan;
3066
3067 /* set TX ring length */
3068 for (chan = 0; chan < tx_channels_count; chan++)
3069 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3070 (priv->dma_conf.dma_tx_size - 1), chan);
3071
3072 /* set RX ring length */
3073 for (chan = 0; chan < rx_channels_count; chan++)
3074 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3075 (priv->dma_conf.dma_rx_size - 1), chan);
3076 }
3077
3078 /**
3079 * stmmac_set_tx_queue_weight - Set TX queue weight
3080 * @priv: driver private structure
3081 * Description: It is used for setting TX queues weight
3082 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3083 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3084 {
3085 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3086 u32 weight;
3087 u32 queue;
3088
3089 for (queue = 0; queue < tx_queues_count; queue++) {
3090 weight = priv->plat->tx_queues_cfg[queue].weight;
3091 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3092 }
3093 }
3094
3095 /**
3096 * stmmac_configure_cbs - Configure CBS in TX queue
3097 * @priv: driver private structure
3098 * Description: It is used for configuring CBS in AVB TX queues
3099 */
stmmac_configure_cbs(struct stmmac_priv * priv)3100 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3101 {
3102 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3103 u32 mode_to_use;
3104 u32 queue;
3105
3106 /* queue 0 is reserved for legacy traffic */
3107 for (queue = 1; queue < tx_queues_count; queue++) {
3108 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3109 if (mode_to_use == MTL_QUEUE_DCB)
3110 continue;
3111
3112 stmmac_config_cbs(priv, priv->hw,
3113 priv->plat->tx_queues_cfg[queue].send_slope,
3114 priv->plat->tx_queues_cfg[queue].idle_slope,
3115 priv->plat->tx_queues_cfg[queue].high_credit,
3116 priv->plat->tx_queues_cfg[queue].low_credit,
3117 queue);
3118 }
3119 }
3120
3121 /**
3122 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3123 * @priv: driver private structure
3124 * Description: It is used for mapping RX queues to RX dma channels
3125 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3126 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3127 {
3128 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3129 u32 queue;
3130 u32 chan;
3131
3132 for (queue = 0; queue < rx_queues_count; queue++) {
3133 chan = priv->plat->rx_queues_cfg[queue].chan;
3134 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3135 }
3136 }
3137
3138 /**
3139 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3140 * @priv: driver private structure
3141 * Description: It is used for configuring the RX Queue Priority
3142 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3143 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3144 {
3145 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3146 u32 queue;
3147 u32 prio;
3148
3149 for (queue = 0; queue < rx_queues_count; queue++) {
3150 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3151 continue;
3152
3153 prio = priv->plat->rx_queues_cfg[queue].prio;
3154 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3155 }
3156 }
3157
3158 /**
3159 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3160 * @priv: driver private structure
3161 * Description: It is used for configuring the TX Queue Priority
3162 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3163 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3164 {
3165 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3166 u32 queue;
3167 u32 prio;
3168
3169 for (queue = 0; queue < tx_queues_count; queue++) {
3170 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3171 continue;
3172
3173 prio = priv->plat->tx_queues_cfg[queue].prio;
3174 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3175 }
3176 }
3177
3178 /**
3179 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3180 * @priv: driver private structure
3181 * Description: It is used for configuring the RX queue routing
3182 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3183 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3184 {
3185 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3186 u32 queue;
3187 u8 packet;
3188
3189 for (queue = 0; queue < rx_queues_count; queue++) {
3190 /* no specific packet type routing specified for the queue */
3191 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3192 continue;
3193
3194 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3195 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3196 }
3197 }
3198
stmmac_mac_config_rss(struct stmmac_priv * priv)3199 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3200 {
3201 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3202 priv->rss.enable = false;
3203 return;
3204 }
3205
3206 if (priv->dev->features & NETIF_F_RXHASH)
3207 priv->rss.enable = true;
3208 else
3209 priv->rss.enable = false;
3210
3211 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3212 priv->plat->rx_queues_to_use);
3213 }
3214
3215 /**
3216 * stmmac_mtl_configuration - Configure MTL
3217 * @priv: driver private structure
3218 * Description: It is used for configurring MTL
3219 */
stmmac_mtl_configuration(struct stmmac_priv * priv)3220 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3221 {
3222 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3223 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3224
3225 if (tx_queues_count > 1)
3226 stmmac_set_tx_queue_weight(priv);
3227
3228 /* Configure MTL RX algorithms */
3229 if (rx_queues_count > 1)
3230 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3231 priv->plat->rx_sched_algorithm);
3232
3233 /* Configure MTL TX algorithms */
3234 if (tx_queues_count > 1)
3235 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3236 priv->plat->tx_sched_algorithm);
3237
3238 /* Configure CBS in AVB TX queues */
3239 if (tx_queues_count > 1)
3240 stmmac_configure_cbs(priv);
3241
3242 /* Map RX MTL to DMA channels */
3243 stmmac_rx_queue_dma_chan_map(priv);
3244
3245 /* Enable MAC RX Queues */
3246 stmmac_mac_enable_rx_queues(priv);
3247
3248 /* Set RX priorities */
3249 if (rx_queues_count > 1)
3250 stmmac_mac_config_rx_queues_prio(priv);
3251
3252 /* Set TX priorities */
3253 if (tx_queues_count > 1)
3254 stmmac_mac_config_tx_queues_prio(priv);
3255
3256 /* Set RX routing */
3257 if (rx_queues_count > 1)
3258 stmmac_mac_config_rx_queues_routing(priv);
3259
3260 /* Receive Side Scaling */
3261 if (rx_queues_count > 1)
3262 stmmac_mac_config_rss(priv);
3263 }
3264
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3265 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3266 {
3267 if (priv->dma_cap.asp) {
3268 netdev_info(priv->dev, "Enabling Safety Features\n");
3269 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3270 priv->plat->safety_feat_cfg);
3271 } else {
3272 netdev_info(priv->dev, "No Safety Features support found\n");
3273 }
3274 }
3275
stmmac_fpe_start_wq(struct stmmac_priv * priv)3276 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3277 {
3278 char *name;
3279
3280 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3281 clear_bit(__FPE_REMOVING, &priv->fpe_task_state);
3282
3283 name = priv->wq_name;
3284 sprintf(name, "%s-fpe", priv->dev->name);
3285
3286 priv->fpe_wq = create_singlethread_workqueue(name);
3287 if (!priv->fpe_wq) {
3288 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3289
3290 return -ENOMEM;
3291 }
3292 netdev_info(priv->dev, "FPE workqueue start");
3293
3294 return 0;
3295 }
3296
3297 /**
3298 * stmmac_hw_setup - setup mac in a usable state.
3299 * @dev : pointer to the device structure.
3300 * @ptp_register: register PTP if set
3301 * Description:
3302 * this is the main function to setup the HW in a usable state because the
3303 * dma engine is reset, the core registers are configured (e.g. AXI,
3304 * Checksum features, timers). The DMA is ready to start receiving and
3305 * transmitting.
3306 * Return value:
3307 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3308 * file on failure.
3309 */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)3310 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3311 {
3312 struct stmmac_priv *priv = netdev_priv(dev);
3313 u32 rx_cnt = priv->plat->rx_queues_to_use;
3314 u32 tx_cnt = priv->plat->tx_queues_to_use;
3315 bool sph_en;
3316 u32 chan;
3317 int ret;
3318
3319 /* DMA initialization and SW reset */
3320 ret = stmmac_init_dma_engine(priv);
3321 if (ret < 0) {
3322 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3323 __func__);
3324 return ret;
3325 }
3326
3327 /* Copy the MAC addr into the HW */
3328 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3329
3330 /* PS and related bits will be programmed according to the speed */
3331 if (priv->hw->pcs) {
3332 int speed = priv->plat->mac_port_sel_speed;
3333
3334 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3335 (speed == SPEED_1000)) {
3336 priv->hw->ps = speed;
3337 } else {
3338 dev_warn(priv->device, "invalid port speed\n");
3339 priv->hw->ps = 0;
3340 }
3341 }
3342
3343 /* Initialize the MAC Core */
3344 stmmac_core_init(priv, priv->hw, dev);
3345
3346 /* Initialize MTL*/
3347 stmmac_mtl_configuration(priv);
3348
3349 /* Initialize Safety Features */
3350 stmmac_safety_feat_configuration(priv);
3351
3352 ret = stmmac_rx_ipc(priv, priv->hw);
3353 if (!ret) {
3354 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3355 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3356 priv->hw->rx_csum = 0;
3357 }
3358
3359 /* Enable the MAC Rx/Tx */
3360 stmmac_mac_set(priv, priv->ioaddr, true);
3361
3362 /* Set the HW DMA mode and the COE */
3363 stmmac_dma_operation_mode(priv);
3364
3365 stmmac_mmc_setup(priv);
3366
3367 if (ptp_register) {
3368 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3369 if (ret < 0)
3370 netdev_warn(priv->dev,
3371 "failed to enable PTP reference clock: %pe\n",
3372 ERR_PTR(ret));
3373 }
3374
3375 ret = stmmac_init_ptp(priv);
3376 if (ret == -EOPNOTSUPP)
3377 netdev_info(priv->dev, "PTP not supported by HW\n");
3378 else if (ret)
3379 netdev_warn(priv->dev, "PTP init failed\n");
3380 else if (ptp_register)
3381 stmmac_ptp_register(priv);
3382
3383 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3384
3385 /* Convert the timer from msec to usec */
3386 if (!priv->tx_lpi_timer)
3387 priv->tx_lpi_timer = eee_timer * 1000;
3388
3389 if (priv->use_riwt) {
3390 u32 queue;
3391
3392 for (queue = 0; queue < rx_cnt; queue++) {
3393 if (!priv->rx_riwt[queue])
3394 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3395
3396 stmmac_rx_watchdog(priv, priv->ioaddr,
3397 priv->rx_riwt[queue], queue);
3398 }
3399 }
3400
3401 if (priv->hw->pcs)
3402 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3403
3404 /* set TX and RX rings length */
3405 stmmac_set_rings_length(priv);
3406
3407 /* Enable TSO */
3408 if (priv->tso) {
3409 for (chan = 0; chan < tx_cnt; chan++) {
3410 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3411
3412 /* TSO and TBS cannot co-exist */
3413 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3414 continue;
3415
3416 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3417 }
3418 }
3419
3420 /* Enable Split Header */
3421 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3422 for (chan = 0; chan < rx_cnt; chan++)
3423 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3424
3425
3426 /* VLAN Tag Insertion */
3427 if (priv->dma_cap.vlins)
3428 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3429
3430 /* TBS */
3431 for (chan = 0; chan < tx_cnt; chan++) {
3432 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3433 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3434
3435 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3436 }
3437
3438 /* Configure real RX and TX queues */
3439 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3440 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3441
3442 /* Start the ball rolling... */
3443 stmmac_start_all_dma(priv);
3444
3445 if (priv->dma_cap.fpesel) {
3446 stmmac_fpe_start_wq(priv);
3447
3448 if (priv->plat->fpe_cfg->enable)
3449 stmmac_fpe_handshake(priv, true);
3450 }
3451
3452 return 0;
3453 }
3454
stmmac_hw_teardown(struct net_device * dev)3455 static void stmmac_hw_teardown(struct net_device *dev)
3456 {
3457 struct stmmac_priv *priv = netdev_priv(dev);
3458
3459 clk_disable_unprepare(priv->plat->clk_ptp_ref);
3460 }
3461
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3462 static void stmmac_free_irq(struct net_device *dev,
3463 enum request_irq_err irq_err, int irq_idx)
3464 {
3465 struct stmmac_priv *priv = netdev_priv(dev);
3466 int j;
3467
3468 switch (irq_err) {
3469 case REQ_IRQ_ERR_ALL:
3470 irq_idx = priv->plat->tx_queues_to_use;
3471 fallthrough;
3472 case REQ_IRQ_ERR_TX:
3473 for (j = irq_idx - 1; j >= 0; j--) {
3474 if (priv->tx_irq[j] > 0) {
3475 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3476 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3477 }
3478 }
3479 irq_idx = priv->plat->rx_queues_to_use;
3480 fallthrough;
3481 case REQ_IRQ_ERR_RX:
3482 for (j = irq_idx - 1; j >= 0; j--) {
3483 if (priv->rx_irq[j] > 0) {
3484 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3485 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3486 }
3487 }
3488
3489 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3490 free_irq(priv->sfty_ue_irq, dev);
3491 fallthrough;
3492 case REQ_IRQ_ERR_SFTY_UE:
3493 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3494 free_irq(priv->sfty_ce_irq, dev);
3495 fallthrough;
3496 case REQ_IRQ_ERR_SFTY_CE:
3497 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3498 free_irq(priv->lpi_irq, dev);
3499 fallthrough;
3500 case REQ_IRQ_ERR_LPI:
3501 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3502 free_irq(priv->wol_irq, dev);
3503 fallthrough;
3504 case REQ_IRQ_ERR_WOL:
3505 free_irq(dev->irq, dev);
3506 fallthrough;
3507 case REQ_IRQ_ERR_MAC:
3508 case REQ_IRQ_ERR_NO:
3509 /* If MAC IRQ request error, no more IRQ to free */
3510 break;
3511 }
3512 }
3513
stmmac_request_irq_multi_msi(struct net_device * dev)3514 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3515 {
3516 struct stmmac_priv *priv = netdev_priv(dev);
3517 enum request_irq_err irq_err;
3518 int irq_idx = 0;
3519 char *int_name;
3520 int ret;
3521 int i;
3522
3523 /* For common interrupt */
3524 int_name = priv->int_name_mac;
3525 sprintf(int_name, "%s:%s", dev->name, "mac");
3526 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3527 0, int_name, dev);
3528 if (unlikely(ret < 0)) {
3529 netdev_err(priv->dev,
3530 "%s: alloc mac MSI %d (error: %d)\n",
3531 __func__, dev->irq, ret);
3532 irq_err = REQ_IRQ_ERR_MAC;
3533 goto irq_error;
3534 }
3535
3536 /* Request the Wake IRQ in case of another line
3537 * is used for WoL
3538 */
3539 priv->wol_irq_disabled = true;
3540 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3541 int_name = priv->int_name_wol;
3542 sprintf(int_name, "%s:%s", dev->name, "wol");
3543 ret = request_irq(priv->wol_irq,
3544 stmmac_mac_interrupt,
3545 0, int_name, dev);
3546 if (unlikely(ret < 0)) {
3547 netdev_err(priv->dev,
3548 "%s: alloc wol MSI %d (error: %d)\n",
3549 __func__, priv->wol_irq, ret);
3550 irq_err = REQ_IRQ_ERR_WOL;
3551 goto irq_error;
3552 }
3553 }
3554
3555 /* Request the LPI IRQ in case of another line
3556 * is used for LPI
3557 */
3558 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3559 int_name = priv->int_name_lpi;
3560 sprintf(int_name, "%s:%s", dev->name, "lpi");
3561 ret = request_irq(priv->lpi_irq,
3562 stmmac_mac_interrupt,
3563 0, int_name, dev);
3564 if (unlikely(ret < 0)) {
3565 netdev_err(priv->dev,
3566 "%s: alloc lpi MSI %d (error: %d)\n",
3567 __func__, priv->lpi_irq, ret);
3568 irq_err = REQ_IRQ_ERR_LPI;
3569 goto irq_error;
3570 }
3571 }
3572
3573 /* Request the Safety Feature Correctible Error line in
3574 * case of another line is used
3575 */
3576 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3577 int_name = priv->int_name_sfty_ce;
3578 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3579 ret = request_irq(priv->sfty_ce_irq,
3580 stmmac_safety_interrupt,
3581 0, int_name, dev);
3582 if (unlikely(ret < 0)) {
3583 netdev_err(priv->dev,
3584 "%s: alloc sfty ce MSI %d (error: %d)\n",
3585 __func__, priv->sfty_ce_irq, ret);
3586 irq_err = REQ_IRQ_ERR_SFTY_CE;
3587 goto irq_error;
3588 }
3589 }
3590
3591 /* Request the Safety Feature Uncorrectible Error line in
3592 * case of another line is used
3593 */
3594 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3595 int_name = priv->int_name_sfty_ue;
3596 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3597 ret = request_irq(priv->sfty_ue_irq,
3598 stmmac_safety_interrupt,
3599 0, int_name, dev);
3600 if (unlikely(ret < 0)) {
3601 netdev_err(priv->dev,
3602 "%s: alloc sfty ue MSI %d (error: %d)\n",
3603 __func__, priv->sfty_ue_irq, ret);
3604 irq_err = REQ_IRQ_ERR_SFTY_UE;
3605 goto irq_error;
3606 }
3607 }
3608
3609 /* Request Rx MSI irq */
3610 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3611 if (i >= MTL_MAX_RX_QUEUES)
3612 break;
3613 if (priv->rx_irq[i] == 0)
3614 continue;
3615
3616 int_name = priv->int_name_rx_irq[i];
3617 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3618 ret = request_irq(priv->rx_irq[i],
3619 stmmac_msi_intr_rx,
3620 0, int_name, &priv->dma_conf.rx_queue[i]);
3621 if (unlikely(ret < 0)) {
3622 netdev_err(priv->dev,
3623 "%s: alloc rx-%d MSI %d (error: %d)\n",
3624 __func__, i, priv->rx_irq[i], ret);
3625 irq_err = REQ_IRQ_ERR_RX;
3626 irq_idx = i;
3627 goto irq_error;
3628 }
3629 irq_set_affinity_hint(priv->rx_irq[i],
3630 cpumask_of(i % num_online_cpus()));
3631 }
3632
3633 /* Request Tx MSI irq */
3634 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3635 if (i >= MTL_MAX_TX_QUEUES)
3636 break;
3637 if (priv->tx_irq[i] == 0)
3638 continue;
3639
3640 int_name = priv->int_name_tx_irq[i];
3641 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3642 ret = request_irq(priv->tx_irq[i],
3643 stmmac_msi_intr_tx,
3644 0, int_name, &priv->dma_conf.tx_queue[i]);
3645 if (unlikely(ret < 0)) {
3646 netdev_err(priv->dev,
3647 "%s: alloc tx-%d MSI %d (error: %d)\n",
3648 __func__, i, priv->tx_irq[i], ret);
3649 irq_err = REQ_IRQ_ERR_TX;
3650 irq_idx = i;
3651 goto irq_error;
3652 }
3653 irq_set_affinity_hint(priv->tx_irq[i],
3654 cpumask_of(i % num_online_cpus()));
3655 }
3656
3657 return 0;
3658
3659 irq_error:
3660 stmmac_free_irq(dev, irq_err, irq_idx);
3661 return ret;
3662 }
3663
stmmac_request_irq_single(struct net_device * dev)3664 static int stmmac_request_irq_single(struct net_device *dev)
3665 {
3666 struct stmmac_priv *priv = netdev_priv(dev);
3667 enum request_irq_err irq_err;
3668 int ret;
3669
3670 ret = request_irq(dev->irq, stmmac_interrupt,
3671 IRQF_SHARED, dev->name, dev);
3672 if (unlikely(ret < 0)) {
3673 netdev_err(priv->dev,
3674 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3675 __func__, dev->irq, ret);
3676 irq_err = REQ_IRQ_ERR_MAC;
3677 goto irq_error;
3678 }
3679
3680 /* Request the Wake IRQ in case of another line
3681 * is used for WoL
3682 */
3683 priv->wol_irq_disabled = true;
3684 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3685 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3686 IRQF_SHARED, dev->name, dev);
3687 if (unlikely(ret < 0)) {
3688 netdev_err(priv->dev,
3689 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3690 __func__, priv->wol_irq, ret);
3691 irq_err = REQ_IRQ_ERR_WOL;
3692 goto irq_error;
3693 }
3694 }
3695
3696 /* Request the IRQ lines */
3697 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3698 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3699 IRQF_SHARED, dev->name, dev);
3700 if (unlikely(ret < 0)) {
3701 netdev_err(priv->dev,
3702 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3703 __func__, priv->lpi_irq, ret);
3704 irq_err = REQ_IRQ_ERR_LPI;
3705 goto irq_error;
3706 }
3707 }
3708
3709 return 0;
3710
3711 irq_error:
3712 stmmac_free_irq(dev, irq_err, 0);
3713 return ret;
3714 }
3715
stmmac_request_irq(struct net_device * dev)3716 static int stmmac_request_irq(struct net_device *dev)
3717 {
3718 struct stmmac_priv *priv = netdev_priv(dev);
3719 int ret;
3720
3721 /* Request the IRQ lines */
3722 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3723 ret = stmmac_request_irq_multi_msi(dev);
3724 else
3725 ret = stmmac_request_irq_single(dev);
3726
3727 return ret;
3728 }
3729
3730 /**
3731 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3732 * @priv: driver private structure
3733 * @mtu: MTU to setup the dma queue and buf with
3734 * Description: Allocate and generate a dma_conf based on the provided MTU.
3735 * Allocate the Tx/Rx DMA queue and init them.
3736 * Return value:
3737 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3738 */
3739 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3740 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3741 {
3742 struct stmmac_dma_conf *dma_conf;
3743 int chan, bfsize, ret;
3744
3745 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3746 if (!dma_conf) {
3747 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3748 __func__);
3749 return ERR_PTR(-ENOMEM);
3750 }
3751
3752 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3753 if (bfsize < 0)
3754 bfsize = 0;
3755
3756 if (bfsize < BUF_SIZE_16KiB)
3757 bfsize = stmmac_set_bfsize(mtu, 0);
3758
3759 dma_conf->dma_buf_sz = bfsize;
3760 /* Chose the tx/rx size from the already defined one in the
3761 * priv struct. (if defined)
3762 */
3763 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3764 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3765
3766 if (!dma_conf->dma_tx_size)
3767 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3768 if (!dma_conf->dma_rx_size)
3769 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3770
3771 /* Earlier check for TBS */
3772 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3773 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3774 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3775
3776 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3777 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3778 }
3779
3780 ret = alloc_dma_desc_resources(priv, dma_conf);
3781 if (ret < 0) {
3782 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3783 __func__);
3784 goto alloc_error;
3785 }
3786
3787 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3788 if (ret < 0) {
3789 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3790 __func__);
3791 goto init_error;
3792 }
3793
3794 return dma_conf;
3795
3796 init_error:
3797 free_dma_desc_resources(priv, dma_conf);
3798 alloc_error:
3799 kfree(dma_conf);
3800 return ERR_PTR(ret);
3801 }
3802
3803 /**
3804 * __stmmac_open - open entry point of the driver
3805 * @dev : pointer to the device structure.
3806 * @dma_conf : structure to take the dma data
3807 * Description:
3808 * This function is the open entry point of the driver.
3809 * Return value:
3810 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3811 * file on failure.
3812 */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3813 static int __stmmac_open(struct net_device *dev,
3814 struct stmmac_dma_conf *dma_conf)
3815 {
3816 struct stmmac_priv *priv = netdev_priv(dev);
3817 int mode = priv->plat->phy_interface;
3818 u32 chan;
3819 int ret;
3820
3821 ret = pm_runtime_resume_and_get(priv->device);
3822 if (ret < 0)
3823 return ret;
3824
3825 if (priv->hw->pcs != STMMAC_PCS_TBI &&
3826 priv->hw->pcs != STMMAC_PCS_RTBI &&
3827 (!priv->hw->xpcs ||
3828 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3829 !priv->hw->lynx_pcs) {
3830 ret = stmmac_init_phy(dev);
3831 if (ret) {
3832 netdev_err(priv->dev,
3833 "%s: Cannot attach to PHY (error: %d)\n",
3834 __func__, ret);
3835 goto init_phy_error;
3836 }
3837 }
3838
3839 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3840
3841 buf_sz = dma_conf->dma_buf_sz;
3842 for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3843 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3844 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3845 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3846
3847 stmmac_reset_queues_param(priv);
3848
3849 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3850 priv->plat->serdes_powerup) {
3851 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3852 if (ret < 0) {
3853 netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3854 __func__);
3855 goto init_error;
3856 }
3857 }
3858
3859 ret = stmmac_hw_setup(dev, true);
3860 if (ret < 0) {
3861 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3862 goto init_error;
3863 }
3864
3865 stmmac_init_coalesce(priv);
3866
3867 phylink_start(priv->phylink);
3868 /* We may have called phylink_speed_down before */
3869 phylink_speed_up(priv->phylink);
3870
3871 ret = stmmac_request_irq(dev);
3872 if (ret)
3873 goto irq_error;
3874
3875 stmmac_enable_all_queues(priv);
3876 netif_tx_start_all_queues(priv->dev);
3877 stmmac_enable_all_dma_irq(priv);
3878
3879 return 0;
3880
3881 irq_error:
3882 phylink_stop(priv->phylink);
3883
3884 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3885 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3886
3887 stmmac_hw_teardown(dev);
3888 init_error:
3889 phylink_disconnect_phy(priv->phylink);
3890 init_phy_error:
3891 pm_runtime_put(priv->device);
3892 return ret;
3893 }
3894
stmmac_open(struct net_device * dev)3895 static int stmmac_open(struct net_device *dev)
3896 {
3897 struct stmmac_priv *priv = netdev_priv(dev);
3898 struct stmmac_dma_conf *dma_conf;
3899 int ret;
3900
3901 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3902 if (IS_ERR(dma_conf))
3903 return PTR_ERR(dma_conf);
3904
3905 ret = __stmmac_open(dev, dma_conf);
3906 if (ret)
3907 free_dma_desc_resources(priv, dma_conf);
3908
3909 kfree(dma_conf);
3910 return ret;
3911 }
3912
stmmac_fpe_stop_wq(struct stmmac_priv * priv)3913 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3914 {
3915 set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3916
3917 if (priv->fpe_wq) {
3918 destroy_workqueue(priv->fpe_wq);
3919 priv->fpe_wq = NULL;
3920 }
3921
3922 netdev_info(priv->dev, "FPE workqueue stop");
3923 }
3924
3925 /**
3926 * stmmac_release - close entry point of the driver
3927 * @dev : device pointer.
3928 * Description:
3929 * This is the stop entry point of the driver.
3930 */
stmmac_release(struct net_device * dev)3931 static int stmmac_release(struct net_device *dev)
3932 {
3933 struct stmmac_priv *priv = netdev_priv(dev);
3934 u32 chan;
3935
3936 if (device_may_wakeup(priv->device))
3937 phylink_speed_down(priv->phylink, false);
3938 /* Stop and disconnect the PHY */
3939 phylink_stop(priv->phylink);
3940 phylink_disconnect_phy(priv->phylink);
3941
3942 stmmac_disable_all_queues(priv);
3943
3944 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3945 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3946
3947 netif_tx_disable(dev);
3948
3949 /* Free the IRQ lines */
3950 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3951
3952 if (priv->eee_enabled) {
3953 priv->tx_path_in_lpi_mode = false;
3954 del_timer_sync(&priv->eee_ctrl_timer);
3955 }
3956
3957 /* Stop TX/RX DMA and clear the descriptors */
3958 stmmac_stop_all_dma(priv);
3959
3960 /* Release and free the Rx/Tx resources */
3961 free_dma_desc_resources(priv, &priv->dma_conf);
3962
3963 /* Disable the MAC Rx/Tx */
3964 stmmac_mac_set(priv, priv->ioaddr, false);
3965
3966 /* Powerdown Serdes if there is */
3967 if (priv->plat->serdes_powerdown)
3968 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3969
3970 netif_carrier_off(dev);
3971
3972 stmmac_release_ptp(priv);
3973
3974 pm_runtime_put(priv->device);
3975
3976 if (priv->dma_cap.fpesel)
3977 stmmac_fpe_stop_wq(priv);
3978
3979 return 0;
3980 }
3981
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)3982 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3983 struct stmmac_tx_queue *tx_q)
3984 {
3985 u16 tag = 0x0, inner_tag = 0x0;
3986 u32 inner_type = 0x0;
3987 struct dma_desc *p;
3988
3989 if (!priv->dma_cap.vlins)
3990 return false;
3991 if (!skb_vlan_tag_present(skb))
3992 return false;
3993 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3994 inner_tag = skb_vlan_tag_get(skb);
3995 inner_type = STMMAC_VLAN_INSERT;
3996 }
3997
3998 tag = skb_vlan_tag_get(skb);
3999
4000 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4001 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4002 else
4003 p = &tx_q->dma_tx[tx_q->cur_tx];
4004
4005 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4006 return false;
4007
4008 stmmac_set_tx_owner(priv, p);
4009 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4010 return true;
4011 }
4012
4013 /**
4014 * stmmac_tso_allocator - close entry point of the driver
4015 * @priv: driver private structure
4016 * @des: buffer start address
4017 * @total_len: total length to fill in descriptors
4018 * @last_segment: condition for the last descriptor
4019 * @queue: TX queue index
4020 * Description:
4021 * This function fills descriptor and request new descriptors according to
4022 * buffer length to fill
4023 */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4024 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4025 int total_len, bool last_segment, u32 queue)
4026 {
4027 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4028 struct dma_desc *desc;
4029 u32 buff_size;
4030 int tmp_len;
4031
4032 tmp_len = total_len;
4033
4034 while (tmp_len > 0) {
4035 dma_addr_t curr_addr;
4036
4037 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4038 priv->dma_conf.dma_tx_size);
4039 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4040
4041 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4042 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4043 else
4044 desc = &tx_q->dma_tx[tx_q->cur_tx];
4045
4046 curr_addr = des + (total_len - tmp_len);
4047 if (priv->dma_cap.addr64 <= 32)
4048 desc->des0 = cpu_to_le32(curr_addr);
4049 else
4050 stmmac_set_desc_addr(priv, desc, curr_addr);
4051
4052 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4053 TSO_MAX_BUFF_SIZE : tmp_len;
4054
4055 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4056 0, 1,
4057 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4058 0, 0);
4059
4060 tmp_len -= TSO_MAX_BUFF_SIZE;
4061 }
4062 }
4063
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4064 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4065 {
4066 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4067 int desc_size;
4068
4069 if (likely(priv->extend_desc))
4070 desc_size = sizeof(struct dma_extended_desc);
4071 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4072 desc_size = sizeof(struct dma_edesc);
4073 else
4074 desc_size = sizeof(struct dma_desc);
4075
4076 /* The own bit must be the latest setting done when prepare the
4077 * descriptor and then barrier is needed to make sure that
4078 * all is coherent before granting the DMA engine.
4079 */
4080 wmb();
4081
4082 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4083 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4084 }
4085
4086 /**
4087 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4088 * @skb : the socket buffer
4089 * @dev : device pointer
4090 * Description: this is the transmit function that is called on TSO frames
4091 * (support available on GMAC4 and newer chips).
4092 * Diagram below show the ring programming in case of TSO frames:
4093 *
4094 * First Descriptor
4095 * --------
4096 * | DES0 |---> buffer1 = L2/L3/L4 header
4097 * | DES1 |---> TCP Payload (can continue on next descr...)
4098 * | DES2 |---> buffer 1 and 2 len
4099 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4100 * --------
4101 * |
4102 * ...
4103 * |
4104 * --------
4105 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
4106 * | DES1 | --|
4107 * | DES2 | --> buffer 1 and 2 len
4108 * | DES3 |
4109 * --------
4110 *
4111 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4112 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4113 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4114 {
4115 struct dma_desc *desc, *first, *mss_desc = NULL;
4116 struct stmmac_priv *priv = netdev_priv(dev);
4117 int nfrags = skb_shinfo(skb)->nr_frags;
4118 u32 queue = skb_get_queue_mapping(skb);
4119 unsigned int first_entry, tx_packets;
4120 struct stmmac_txq_stats *txq_stats;
4121 int tmp_pay_len = 0, first_tx;
4122 struct stmmac_tx_queue *tx_q;
4123 bool has_vlan, set_ic;
4124 dma_addr_t tso_des, des;
4125 u8 proto_hdr_len, hdr;
4126 u32 pay_len, mss;
4127 int i;
4128
4129 tx_q = &priv->dma_conf.tx_queue[queue];
4130 txq_stats = &priv->xstats.txq_stats[queue];
4131 first_tx = tx_q->cur_tx;
4132
4133 /* Compute header lengths */
4134 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4135 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4136 hdr = sizeof(struct udphdr);
4137 } else {
4138 proto_hdr_len = skb_tcp_all_headers(skb);
4139 hdr = tcp_hdrlen(skb);
4140 }
4141
4142 /* Desc availability based on threshold should be enough safe */
4143 if (unlikely(stmmac_tx_avail(priv, queue) <
4144 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4145 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4146 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4147 queue));
4148 /* This is a hard error, log it. */
4149 netdev_err(priv->dev,
4150 "%s: Tx Ring full when queue awake\n",
4151 __func__);
4152 }
4153 return NETDEV_TX_BUSY;
4154 }
4155
4156 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4157
4158 mss = skb_shinfo(skb)->gso_size;
4159
4160 /* set new MSS value if needed */
4161 if (mss != tx_q->mss) {
4162 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4163 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4164 else
4165 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4166
4167 stmmac_set_mss(priv, mss_desc, mss);
4168 tx_q->mss = mss;
4169 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4170 priv->dma_conf.dma_tx_size);
4171 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4172 }
4173
4174 if (netif_msg_tx_queued(priv)) {
4175 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4176 __func__, hdr, proto_hdr_len, pay_len, mss);
4177 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4178 skb->data_len);
4179 }
4180
4181 /* Check if VLAN can be inserted by HW */
4182 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4183
4184 first_entry = tx_q->cur_tx;
4185 WARN_ON(tx_q->tx_skbuff[first_entry]);
4186
4187 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4188 desc = &tx_q->dma_entx[first_entry].basic;
4189 else
4190 desc = &tx_q->dma_tx[first_entry];
4191 first = desc;
4192
4193 if (has_vlan)
4194 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4195
4196 /* first descriptor: fill Headers on Buf1 */
4197 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4198 DMA_TO_DEVICE);
4199 if (dma_mapping_error(priv->device, des))
4200 goto dma_map_err;
4201
4202 if (priv->dma_cap.addr64 <= 32) {
4203 first->des0 = cpu_to_le32(des);
4204
4205 /* Fill start of payload in buff2 of first descriptor */
4206 if (pay_len)
4207 first->des1 = cpu_to_le32(des + proto_hdr_len);
4208
4209 /* If needed take extra descriptors to fill the remaining payload */
4210 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4211 tso_des = des;
4212 } else {
4213 stmmac_set_desc_addr(priv, first, des);
4214 tmp_pay_len = pay_len;
4215 tso_des = des + proto_hdr_len;
4216 pay_len = 0;
4217 }
4218
4219 stmmac_tso_allocator(priv, tso_des, tmp_pay_len, (nfrags == 0), queue);
4220
4221 /* In case two or more DMA transmit descriptors are allocated for this
4222 * non-paged SKB data, the DMA buffer address should be saved to
4223 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4224 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4225 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4226 * since the tail areas of the DMA buffer can be accessed by DMA engine
4227 * sooner or later.
4228 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4229 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4230 * this DMA buffer right after the DMA engine completely finishes the
4231 * full buffer transmission.
4232 */
4233 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4234 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4235 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4236 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4237
4238 /* Prepare fragments */
4239 for (i = 0; i < nfrags; i++) {
4240 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4241
4242 des = skb_frag_dma_map(priv->device, frag, 0,
4243 skb_frag_size(frag),
4244 DMA_TO_DEVICE);
4245 if (dma_mapping_error(priv->device, des))
4246 goto dma_map_err;
4247
4248 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4249 (i == nfrags - 1), queue);
4250
4251 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4252 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4253 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4254 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4255 }
4256
4257 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4258
4259 /* Only the last descriptor gets to point to the skb. */
4260 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4261 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4262
4263 /* Manage tx mitigation */
4264 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4265 tx_q->tx_count_frames += tx_packets;
4266
4267 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4268 set_ic = true;
4269 else if (!priv->tx_coal_frames[queue])
4270 set_ic = false;
4271 else if (tx_packets > priv->tx_coal_frames[queue])
4272 set_ic = true;
4273 else if ((tx_q->tx_count_frames %
4274 priv->tx_coal_frames[queue]) < tx_packets)
4275 set_ic = true;
4276 else
4277 set_ic = false;
4278
4279 if (set_ic) {
4280 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4281 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4282 else
4283 desc = &tx_q->dma_tx[tx_q->cur_tx];
4284
4285 tx_q->tx_count_frames = 0;
4286 stmmac_set_tx_ic(priv, desc);
4287 }
4288
4289 /* We've used all descriptors we need for this skb, however,
4290 * advance cur_tx so that it references a fresh descriptor.
4291 * ndo_start_xmit will fill this descriptor the next time it's
4292 * called and stmmac_tx_clean may clean up to this descriptor.
4293 */
4294 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4295
4296 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4297 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4298 __func__);
4299 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4300 }
4301
4302 u64_stats_update_begin(&txq_stats->q_syncp);
4303 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4304 u64_stats_inc(&txq_stats->q.tx_tso_frames);
4305 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4306 if (set_ic)
4307 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4308 u64_stats_update_end(&txq_stats->q_syncp);
4309
4310 if (priv->sarc_type)
4311 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4312
4313 skb_tx_timestamp(skb);
4314
4315 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4316 priv->hwts_tx_en)) {
4317 /* declare that device is doing timestamping */
4318 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4319 stmmac_enable_tx_timestamp(priv, first);
4320 }
4321
4322 /* Complete the first descriptor before granting the DMA */
4323 stmmac_prepare_tso_tx_desc(priv, first, 1,
4324 proto_hdr_len,
4325 pay_len,
4326 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4327 hdr / 4, (skb->len - proto_hdr_len));
4328
4329 /* If context desc is used to change MSS */
4330 if (mss_desc) {
4331 /* Make sure that first descriptor has been completely
4332 * written, including its own bit. This is because MSS is
4333 * actually before first descriptor, so we need to make
4334 * sure that MSS's own bit is the last thing written.
4335 */
4336 dma_wmb();
4337 stmmac_set_tx_owner(priv, mss_desc);
4338 }
4339
4340 if (netif_msg_pktdata(priv)) {
4341 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4342 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4343 tx_q->cur_tx, first, nfrags);
4344 pr_info(">>> frame to be transmitted: ");
4345 print_pkt(skb->data, skb_headlen(skb));
4346 }
4347
4348 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4349
4350 stmmac_flush_tx_descriptors(priv, queue);
4351 stmmac_tx_timer_arm(priv, queue);
4352
4353 return NETDEV_TX_OK;
4354
4355 dma_map_err:
4356 dev_err(priv->device, "Tx dma map failed\n");
4357 dev_kfree_skb(skb);
4358 priv->xstats.tx_dropped++;
4359 return NETDEV_TX_OK;
4360 }
4361
4362 /**
4363 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4364 * @skb: socket buffer to check
4365 *
4366 * Check if a packet has an ethertype that will trigger the IP header checks
4367 * and IP/TCP checksum engine of the stmmac core.
4368 *
4369 * Return: true if the ethertype can trigger the checksum engine, false
4370 * otherwise
4371 */
stmmac_has_ip_ethertype(struct sk_buff * skb)4372 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4373 {
4374 int depth = 0;
4375 __be16 proto;
4376
4377 proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4378 &depth);
4379
4380 return (depth <= ETH_HLEN) &&
4381 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4382 }
4383
4384 /**
4385 * stmmac_xmit - Tx entry point of the driver
4386 * @skb : the socket buffer
4387 * @dev : device pointer
4388 * Description : this is the tx entry point of the driver.
4389 * It programs the chain or the ring and supports oversized frames
4390 * and SG feature.
4391 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4392 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4393 {
4394 unsigned int first_entry, tx_packets, enh_desc;
4395 struct stmmac_priv *priv = netdev_priv(dev);
4396 unsigned int nopaged_len = skb_headlen(skb);
4397 int i, csum_insertion = 0, is_jumbo = 0;
4398 u32 queue = skb_get_queue_mapping(skb);
4399 int nfrags = skb_shinfo(skb)->nr_frags;
4400 int gso = skb_shinfo(skb)->gso_type;
4401 struct stmmac_txq_stats *txq_stats;
4402 struct dma_edesc *tbs_desc = NULL;
4403 struct dma_desc *desc, *first;
4404 struct stmmac_tx_queue *tx_q;
4405 bool has_vlan, set_ic;
4406 int entry, first_tx;
4407 dma_addr_t des;
4408
4409 tx_q = &priv->dma_conf.tx_queue[queue];
4410 txq_stats = &priv->xstats.txq_stats[queue];
4411 first_tx = tx_q->cur_tx;
4412
4413 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4414 stmmac_disable_eee_mode(priv);
4415
4416 /* Manage oversized TCP frames for GMAC4 device */
4417 if (skb_is_gso(skb) && priv->tso) {
4418 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4419 return stmmac_tso_xmit(skb, dev);
4420 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4421 return stmmac_tso_xmit(skb, dev);
4422 }
4423
4424 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4425 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4426 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4427 queue));
4428 /* This is a hard error, log it. */
4429 netdev_err(priv->dev,
4430 "%s: Tx Ring full when queue awake\n",
4431 __func__);
4432 }
4433 return NETDEV_TX_BUSY;
4434 }
4435
4436 /* Check if VLAN can be inserted by HW */
4437 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4438
4439 entry = tx_q->cur_tx;
4440 first_entry = entry;
4441 WARN_ON(tx_q->tx_skbuff[first_entry]);
4442
4443 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4444 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4445 * queues. In that case, checksum offloading for those queues that don't
4446 * support tx coe needs to fallback to software checksum calculation.
4447 *
4448 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4449 * also have to be checksummed in software.
4450 */
4451 if (csum_insertion &&
4452 (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4453 !stmmac_has_ip_ethertype(skb))) {
4454 if (unlikely(skb_checksum_help(skb)))
4455 goto dma_map_err;
4456 csum_insertion = !csum_insertion;
4457 }
4458
4459 if (likely(priv->extend_desc))
4460 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4461 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4462 desc = &tx_q->dma_entx[entry].basic;
4463 else
4464 desc = tx_q->dma_tx + entry;
4465
4466 first = desc;
4467
4468 if (has_vlan)
4469 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4470
4471 enh_desc = priv->plat->enh_desc;
4472 /* To program the descriptors according to the size of the frame */
4473 if (enh_desc)
4474 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4475
4476 if (unlikely(is_jumbo)) {
4477 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4478 if (unlikely(entry < 0) && (entry != -EINVAL))
4479 goto dma_map_err;
4480 }
4481
4482 for (i = 0; i < nfrags; i++) {
4483 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4484 int len = skb_frag_size(frag);
4485 bool last_segment = (i == (nfrags - 1));
4486
4487 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4488 WARN_ON(tx_q->tx_skbuff[entry]);
4489
4490 if (likely(priv->extend_desc))
4491 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4492 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4493 desc = &tx_q->dma_entx[entry].basic;
4494 else
4495 desc = tx_q->dma_tx + entry;
4496
4497 des = skb_frag_dma_map(priv->device, frag, 0, len,
4498 DMA_TO_DEVICE);
4499 if (dma_mapping_error(priv->device, des))
4500 goto dma_map_err; /* should reuse desc w/o issues */
4501
4502 tx_q->tx_skbuff_dma[entry].buf = des;
4503
4504 stmmac_set_desc_addr(priv, desc, des);
4505
4506 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4507 tx_q->tx_skbuff_dma[entry].len = len;
4508 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4509 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4510
4511 /* Prepare the descriptor and set the own bit too */
4512 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4513 priv->mode, 1, last_segment, skb->len);
4514 }
4515
4516 /* Only the last descriptor gets to point to the skb. */
4517 tx_q->tx_skbuff[entry] = skb;
4518 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4519
4520 /* According to the coalesce parameter the IC bit for the latest
4521 * segment is reset and the timer re-started to clean the tx status.
4522 * This approach takes care about the fragments: desc is the first
4523 * element in case of no SG.
4524 */
4525 tx_packets = (entry + 1) - first_tx;
4526 tx_q->tx_count_frames += tx_packets;
4527
4528 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4529 set_ic = true;
4530 else if (!priv->tx_coal_frames[queue])
4531 set_ic = false;
4532 else if (tx_packets > priv->tx_coal_frames[queue])
4533 set_ic = true;
4534 else if ((tx_q->tx_count_frames %
4535 priv->tx_coal_frames[queue]) < tx_packets)
4536 set_ic = true;
4537 else
4538 set_ic = false;
4539
4540 if (set_ic) {
4541 if (likely(priv->extend_desc))
4542 desc = &tx_q->dma_etx[entry].basic;
4543 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4544 desc = &tx_q->dma_entx[entry].basic;
4545 else
4546 desc = &tx_q->dma_tx[entry];
4547
4548 tx_q->tx_count_frames = 0;
4549 stmmac_set_tx_ic(priv, desc);
4550 }
4551
4552 /* We've used all descriptors we need for this skb, however,
4553 * advance cur_tx so that it references a fresh descriptor.
4554 * ndo_start_xmit will fill this descriptor the next time it's
4555 * called and stmmac_tx_clean may clean up to this descriptor.
4556 */
4557 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4558 tx_q->cur_tx = entry;
4559
4560 if (netif_msg_pktdata(priv)) {
4561 netdev_dbg(priv->dev,
4562 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4563 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4564 entry, first, nfrags);
4565
4566 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4567 print_pkt(skb->data, skb->len);
4568 }
4569
4570 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4571 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4572 __func__);
4573 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4574 }
4575
4576 u64_stats_update_begin(&txq_stats->q_syncp);
4577 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4578 if (set_ic)
4579 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4580 u64_stats_update_end(&txq_stats->q_syncp);
4581
4582 if (priv->sarc_type)
4583 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4584
4585 skb_tx_timestamp(skb);
4586
4587 /* Ready to fill the first descriptor and set the OWN bit w/o any
4588 * problems because all the descriptors are actually ready to be
4589 * passed to the DMA engine.
4590 */
4591 if (likely(!is_jumbo)) {
4592 bool last_segment = (nfrags == 0);
4593
4594 des = dma_map_single(priv->device, skb->data,
4595 nopaged_len, DMA_TO_DEVICE);
4596 if (dma_mapping_error(priv->device, des))
4597 goto dma_map_err;
4598
4599 tx_q->tx_skbuff_dma[first_entry].buf = des;
4600 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4601 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4602
4603 stmmac_set_desc_addr(priv, first, des);
4604
4605 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4606 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4607
4608 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4609 priv->hwts_tx_en)) {
4610 /* declare that device is doing timestamping */
4611 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4612 stmmac_enable_tx_timestamp(priv, first);
4613 }
4614
4615 /* Prepare the first descriptor setting the OWN bit too */
4616 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4617 csum_insertion, priv->mode, 0, last_segment,
4618 skb->len);
4619 }
4620
4621 if (tx_q->tbs & STMMAC_TBS_EN) {
4622 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4623
4624 tbs_desc = &tx_q->dma_entx[first_entry];
4625 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4626 }
4627
4628 stmmac_set_tx_owner(priv, first);
4629
4630 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4631
4632 stmmac_enable_dma_transmission(priv, priv->ioaddr);
4633
4634 stmmac_flush_tx_descriptors(priv, queue);
4635 stmmac_tx_timer_arm(priv, queue);
4636
4637 return NETDEV_TX_OK;
4638
4639 dma_map_err:
4640 netdev_err(priv->dev, "Tx DMA map failed\n");
4641 dev_kfree_skb(skb);
4642 priv->xstats.tx_dropped++;
4643 return NETDEV_TX_OK;
4644 }
4645
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4646 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4647 {
4648 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4649 __be16 vlan_proto = veth->h_vlan_proto;
4650 u16 vlanid;
4651
4652 if ((vlan_proto == htons(ETH_P_8021Q) &&
4653 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4654 (vlan_proto == htons(ETH_P_8021AD) &&
4655 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4656 /* pop the vlan tag */
4657 vlanid = ntohs(veth->h_vlan_TCI);
4658 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4659 skb_pull(skb, VLAN_HLEN);
4660 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4661 }
4662 }
4663
4664 /**
4665 * stmmac_rx_refill - refill used skb preallocated buffers
4666 * @priv: driver private structure
4667 * @queue: RX queue index
4668 * Description : this is to reallocate the skb for the reception process
4669 * that is based on zero-copy.
4670 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4671 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4672 {
4673 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4674 int dirty = stmmac_rx_dirty(priv, queue);
4675 unsigned int entry = rx_q->dirty_rx;
4676 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4677
4678 if (priv->dma_cap.host_dma_width <= 32)
4679 gfp |= GFP_DMA32;
4680
4681 while (dirty-- > 0) {
4682 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4683 struct dma_desc *p;
4684 bool use_rx_wd;
4685
4686 if (priv->extend_desc)
4687 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4688 else
4689 p = rx_q->dma_rx + entry;
4690
4691 if (!buf->page) {
4692 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4693 if (!buf->page)
4694 break;
4695 }
4696
4697 if (priv->sph && !buf->sec_page) {
4698 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4699 if (!buf->sec_page)
4700 break;
4701
4702 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4703 }
4704
4705 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4706
4707 stmmac_set_desc_addr(priv, p, buf->addr);
4708 if (priv->sph)
4709 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4710 else
4711 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4712 stmmac_refill_desc3(priv, rx_q, p);
4713
4714 rx_q->rx_count_frames++;
4715 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4716 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4717 rx_q->rx_count_frames = 0;
4718
4719 use_rx_wd = !priv->rx_coal_frames[queue];
4720 use_rx_wd |= rx_q->rx_count_frames > 0;
4721 if (!priv->use_riwt)
4722 use_rx_wd = false;
4723
4724 dma_wmb();
4725 stmmac_set_rx_owner(priv, p, use_rx_wd);
4726
4727 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4728 }
4729 rx_q->dirty_rx = entry;
4730 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4731 (rx_q->dirty_rx * sizeof(struct dma_desc));
4732 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4733 }
4734
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4735 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4736 struct dma_desc *p,
4737 int status, unsigned int len)
4738 {
4739 unsigned int plen = 0, hlen = 0;
4740 int coe = priv->hw->rx_csum;
4741
4742 /* Not first descriptor, buffer is always zero */
4743 if (priv->sph && len)
4744 return 0;
4745
4746 /* First descriptor, get split header length */
4747 stmmac_get_rx_header_len(priv, p, &hlen);
4748 if (priv->sph && hlen) {
4749 priv->xstats.rx_split_hdr_pkt_n++;
4750 return hlen;
4751 }
4752
4753 /* First descriptor, not last descriptor and not split header */
4754 if (status & rx_not_ls)
4755 return priv->dma_conf.dma_buf_sz;
4756
4757 plen = stmmac_get_rx_frame_len(priv, p, coe);
4758
4759 /* First descriptor and last descriptor and not split header */
4760 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4761 }
4762
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4763 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4764 struct dma_desc *p,
4765 int status, unsigned int len)
4766 {
4767 int coe = priv->hw->rx_csum;
4768 unsigned int plen = 0;
4769
4770 /* Not split header, buffer is not available */
4771 if (!priv->sph)
4772 return 0;
4773
4774 /* Not last descriptor */
4775 if (status & rx_not_ls)
4776 return priv->dma_conf.dma_buf_sz;
4777
4778 plen = stmmac_get_rx_frame_len(priv, p, coe);
4779
4780 /* Last descriptor */
4781 return plen - len;
4782 }
4783
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4784 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4785 struct xdp_frame *xdpf, bool dma_map)
4786 {
4787 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4788 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4789 unsigned int entry = tx_q->cur_tx;
4790 struct dma_desc *tx_desc;
4791 dma_addr_t dma_addr;
4792 bool set_ic;
4793
4794 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4795 return STMMAC_XDP_CONSUMED;
4796
4797 if (likely(priv->extend_desc))
4798 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4799 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4800 tx_desc = &tx_q->dma_entx[entry].basic;
4801 else
4802 tx_desc = tx_q->dma_tx + entry;
4803
4804 if (dma_map) {
4805 dma_addr = dma_map_single(priv->device, xdpf->data,
4806 xdpf->len, DMA_TO_DEVICE);
4807 if (dma_mapping_error(priv->device, dma_addr))
4808 return STMMAC_XDP_CONSUMED;
4809
4810 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4811 } else {
4812 struct page *page = virt_to_page(xdpf->data);
4813
4814 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4815 xdpf->headroom;
4816 dma_sync_single_for_device(priv->device, dma_addr,
4817 xdpf->len, DMA_BIDIRECTIONAL);
4818
4819 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4820 }
4821
4822 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4823 tx_q->tx_skbuff_dma[entry].map_as_page = false;
4824 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4825 tx_q->tx_skbuff_dma[entry].last_segment = true;
4826 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4827
4828 tx_q->xdpf[entry] = xdpf;
4829
4830 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4831
4832 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4833 true, priv->mode, true, true,
4834 xdpf->len);
4835
4836 tx_q->tx_count_frames++;
4837
4838 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4839 set_ic = true;
4840 else
4841 set_ic = false;
4842
4843 if (set_ic) {
4844 tx_q->tx_count_frames = 0;
4845 stmmac_set_tx_ic(priv, tx_desc);
4846 u64_stats_update_begin(&txq_stats->q_syncp);
4847 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4848 u64_stats_update_end(&txq_stats->q_syncp);
4849 }
4850
4851 stmmac_enable_dma_transmission(priv, priv->ioaddr);
4852
4853 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4854 tx_q->cur_tx = entry;
4855
4856 return STMMAC_XDP_TX;
4857 }
4858
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)4859 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4860 int cpu)
4861 {
4862 int index = cpu;
4863
4864 if (unlikely(index < 0))
4865 index = 0;
4866
4867 while (index >= priv->plat->tx_queues_to_use)
4868 index -= priv->plat->tx_queues_to_use;
4869
4870 return index;
4871 }
4872
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)4873 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4874 struct xdp_buff *xdp)
4875 {
4876 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4877 int cpu = smp_processor_id();
4878 struct netdev_queue *nq;
4879 int queue;
4880 int res;
4881
4882 if (unlikely(!xdpf))
4883 return STMMAC_XDP_CONSUMED;
4884
4885 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4886 nq = netdev_get_tx_queue(priv->dev, queue);
4887
4888 __netif_tx_lock(nq, cpu);
4889 /* Avoids TX time-out as we are sharing with slow path */
4890 txq_trans_cond_update(nq);
4891
4892 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4893 if (res == STMMAC_XDP_TX)
4894 stmmac_flush_tx_descriptors(priv, queue);
4895
4896 __netif_tx_unlock(nq);
4897
4898 return res;
4899 }
4900
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)4901 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4902 struct bpf_prog *prog,
4903 struct xdp_buff *xdp)
4904 {
4905 u32 act;
4906 int res;
4907
4908 act = bpf_prog_run_xdp(prog, xdp);
4909 switch (act) {
4910 case XDP_PASS:
4911 res = STMMAC_XDP_PASS;
4912 break;
4913 case XDP_TX:
4914 res = stmmac_xdp_xmit_back(priv, xdp);
4915 break;
4916 case XDP_REDIRECT:
4917 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4918 res = STMMAC_XDP_CONSUMED;
4919 else
4920 res = STMMAC_XDP_REDIRECT;
4921 break;
4922 default:
4923 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4924 fallthrough;
4925 case XDP_ABORTED:
4926 trace_xdp_exception(priv->dev, prog, act);
4927 fallthrough;
4928 case XDP_DROP:
4929 res = STMMAC_XDP_CONSUMED;
4930 break;
4931 }
4932
4933 return res;
4934 }
4935
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)4936 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4937 struct xdp_buff *xdp)
4938 {
4939 struct bpf_prog *prog;
4940 int res;
4941
4942 prog = READ_ONCE(priv->xdp_prog);
4943 if (!prog) {
4944 res = STMMAC_XDP_PASS;
4945 goto out;
4946 }
4947
4948 res = __stmmac_xdp_run_prog(priv, prog, xdp);
4949 out:
4950 return ERR_PTR(-res);
4951 }
4952
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)4953 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4954 int xdp_status)
4955 {
4956 int cpu = smp_processor_id();
4957 int queue;
4958
4959 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4960
4961 if (xdp_status & STMMAC_XDP_TX)
4962 stmmac_tx_timer_arm(priv, queue);
4963
4964 if (xdp_status & STMMAC_XDP_REDIRECT)
4965 xdp_do_flush();
4966 }
4967
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)4968 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4969 struct xdp_buff *xdp)
4970 {
4971 unsigned int metasize = xdp->data - xdp->data_meta;
4972 unsigned int datasize = xdp->data_end - xdp->data;
4973 struct sk_buff *skb;
4974
4975 skb = __napi_alloc_skb(&ch->rxtx_napi,
4976 xdp->data_end - xdp->data_hard_start,
4977 GFP_ATOMIC | __GFP_NOWARN);
4978 if (unlikely(!skb))
4979 return NULL;
4980
4981 skb_reserve(skb, xdp->data - xdp->data_hard_start);
4982 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4983 if (metasize)
4984 skb_metadata_set(skb, metasize);
4985
4986 return skb;
4987 }
4988
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)4989 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4990 struct dma_desc *p, struct dma_desc *np,
4991 struct xdp_buff *xdp)
4992 {
4993 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
4994 struct stmmac_channel *ch = &priv->channel[queue];
4995 unsigned int len = xdp->data_end - xdp->data;
4996 enum pkt_hash_types hash_type;
4997 int coe = priv->hw->rx_csum;
4998 struct sk_buff *skb;
4999 u32 hash;
5000
5001 skb = stmmac_construct_skb_zc(ch, xdp);
5002 if (!skb) {
5003 priv->xstats.rx_dropped++;
5004 return;
5005 }
5006
5007 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5008 stmmac_rx_vlan(priv->dev, skb);
5009 skb->protocol = eth_type_trans(skb, priv->dev);
5010
5011 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5012 skb_checksum_none_assert(skb);
5013 else
5014 skb->ip_summed = CHECKSUM_UNNECESSARY;
5015
5016 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5017 skb_set_hash(skb, hash, hash_type);
5018
5019 skb_record_rx_queue(skb, queue);
5020 napi_gro_receive(&ch->rxtx_napi, skb);
5021
5022 u64_stats_update_begin(&rxq_stats->napi_syncp);
5023 u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5024 u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5025 u64_stats_update_end(&rxq_stats->napi_syncp);
5026 }
5027
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5028 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5029 {
5030 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5031 unsigned int entry = rx_q->dirty_rx;
5032 struct dma_desc *rx_desc = NULL;
5033 bool ret = true;
5034
5035 budget = min(budget, stmmac_rx_dirty(priv, queue));
5036
5037 while (budget-- > 0 && entry != rx_q->cur_rx) {
5038 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5039 dma_addr_t dma_addr;
5040 bool use_rx_wd;
5041
5042 if (!buf->xdp) {
5043 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5044 if (!buf->xdp) {
5045 ret = false;
5046 break;
5047 }
5048 }
5049
5050 if (priv->extend_desc)
5051 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5052 else
5053 rx_desc = rx_q->dma_rx + entry;
5054
5055 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5056 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5057 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5058 stmmac_refill_desc3(priv, rx_q, rx_desc);
5059
5060 rx_q->rx_count_frames++;
5061 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5062 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5063 rx_q->rx_count_frames = 0;
5064
5065 use_rx_wd = !priv->rx_coal_frames[queue];
5066 use_rx_wd |= rx_q->rx_count_frames > 0;
5067 if (!priv->use_riwt)
5068 use_rx_wd = false;
5069
5070 dma_wmb();
5071 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5072
5073 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5074 }
5075
5076 if (rx_desc) {
5077 rx_q->dirty_rx = entry;
5078 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5079 (rx_q->dirty_rx * sizeof(struct dma_desc));
5080 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5081 }
5082
5083 return ret;
5084 }
5085
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5086 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5087 {
5088 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5089 * to represent incoming packet, whereas cb field in the same structure
5090 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5091 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5092 */
5093 return (struct stmmac_xdp_buff *)xdp;
5094 }
5095
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5096 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5097 {
5098 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5099 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5100 unsigned int count = 0, error = 0, len = 0;
5101 int dirty = stmmac_rx_dirty(priv, queue);
5102 unsigned int next_entry = rx_q->cur_rx;
5103 u32 rx_errors = 0, rx_dropped = 0;
5104 unsigned int desc_size;
5105 struct bpf_prog *prog;
5106 bool failure = false;
5107 int xdp_status = 0;
5108 int status = 0;
5109
5110 if (netif_msg_rx_status(priv)) {
5111 void *rx_head;
5112
5113 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5114 if (priv->extend_desc) {
5115 rx_head = (void *)rx_q->dma_erx;
5116 desc_size = sizeof(struct dma_extended_desc);
5117 } else {
5118 rx_head = (void *)rx_q->dma_rx;
5119 desc_size = sizeof(struct dma_desc);
5120 }
5121
5122 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5123 rx_q->dma_rx_phy, desc_size);
5124 }
5125 while (count < limit) {
5126 struct stmmac_rx_buffer *buf;
5127 struct stmmac_xdp_buff *ctx;
5128 unsigned int buf1_len = 0;
5129 struct dma_desc *np, *p;
5130 int entry;
5131 int res;
5132
5133 if (!count && rx_q->state_saved) {
5134 error = rx_q->state.error;
5135 len = rx_q->state.len;
5136 } else {
5137 rx_q->state_saved = false;
5138 error = 0;
5139 len = 0;
5140 }
5141
5142 if (count >= limit)
5143 break;
5144
5145 read_again:
5146 buf1_len = 0;
5147 entry = next_entry;
5148 buf = &rx_q->buf_pool[entry];
5149
5150 if (dirty >= STMMAC_RX_FILL_BATCH) {
5151 failure = failure ||
5152 !stmmac_rx_refill_zc(priv, queue, dirty);
5153 dirty = 0;
5154 }
5155
5156 if (priv->extend_desc)
5157 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5158 else
5159 p = rx_q->dma_rx + entry;
5160
5161 /* read the status of the incoming frame */
5162 status = stmmac_rx_status(priv, &priv->xstats, p);
5163 /* check if managed by the DMA otherwise go ahead */
5164 if (unlikely(status & dma_own))
5165 break;
5166
5167 /* Prefetch the next RX descriptor */
5168 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5169 priv->dma_conf.dma_rx_size);
5170 next_entry = rx_q->cur_rx;
5171
5172 if (priv->extend_desc)
5173 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5174 else
5175 np = rx_q->dma_rx + next_entry;
5176
5177 prefetch(np);
5178
5179 /* Ensure a valid XSK buffer before proceed */
5180 if (!buf->xdp)
5181 break;
5182
5183 if (priv->extend_desc)
5184 stmmac_rx_extended_status(priv, &priv->xstats,
5185 rx_q->dma_erx + entry);
5186 if (unlikely(status == discard_frame)) {
5187 xsk_buff_free(buf->xdp);
5188 buf->xdp = NULL;
5189 dirty++;
5190 error = 1;
5191 if (!priv->hwts_rx_en)
5192 rx_errors++;
5193 }
5194
5195 if (unlikely(error && (status & rx_not_ls)))
5196 goto read_again;
5197 if (unlikely(error)) {
5198 count++;
5199 continue;
5200 }
5201
5202 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5203 if (likely(status & rx_not_ls)) {
5204 xsk_buff_free(buf->xdp);
5205 buf->xdp = NULL;
5206 dirty++;
5207 count++;
5208 goto read_again;
5209 }
5210
5211 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5212 ctx->priv = priv;
5213 ctx->desc = p;
5214 ctx->ndesc = np;
5215
5216 /* XDP ZC Frame only support primary buffers for now */
5217 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5218 len += buf1_len;
5219
5220 /* ACS is disabled; strip manually. */
5221 if (likely(!(status & rx_not_ls))) {
5222 buf1_len -= ETH_FCS_LEN;
5223 len -= ETH_FCS_LEN;
5224 }
5225
5226 /* RX buffer is good and fit into a XSK pool buffer */
5227 buf->xdp->data_end = buf->xdp->data + buf1_len;
5228 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5229
5230 prog = READ_ONCE(priv->xdp_prog);
5231 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5232
5233 switch (res) {
5234 case STMMAC_XDP_PASS:
5235 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5236 xsk_buff_free(buf->xdp);
5237 break;
5238 case STMMAC_XDP_CONSUMED:
5239 xsk_buff_free(buf->xdp);
5240 rx_dropped++;
5241 break;
5242 case STMMAC_XDP_TX:
5243 case STMMAC_XDP_REDIRECT:
5244 xdp_status |= res;
5245 break;
5246 }
5247
5248 buf->xdp = NULL;
5249 dirty++;
5250 count++;
5251 }
5252
5253 if (status & rx_not_ls) {
5254 rx_q->state_saved = true;
5255 rx_q->state.error = error;
5256 rx_q->state.len = len;
5257 }
5258
5259 stmmac_finalize_xdp_rx(priv, xdp_status);
5260
5261 u64_stats_update_begin(&rxq_stats->napi_syncp);
5262 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5263 u64_stats_update_end(&rxq_stats->napi_syncp);
5264
5265 priv->xstats.rx_dropped += rx_dropped;
5266 priv->xstats.rx_errors += rx_errors;
5267
5268 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5269 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5270 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5271 else
5272 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5273
5274 return (int)count;
5275 }
5276
5277 return failure ? limit : (int)count;
5278 }
5279
5280 /**
5281 * stmmac_rx - manage the receive process
5282 * @priv: driver private structure
5283 * @limit: napi bugget
5284 * @queue: RX queue index.
5285 * Description : this the function called by the napi poll method.
5286 * It gets all the frames inside the ring.
5287 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5288 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5289 {
5290 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5291 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5292 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5293 struct stmmac_channel *ch = &priv->channel[queue];
5294 unsigned int count = 0, error = 0, len = 0;
5295 int status = 0, coe = priv->hw->rx_csum;
5296 unsigned int next_entry = rx_q->cur_rx;
5297 enum dma_data_direction dma_dir;
5298 unsigned int desc_size;
5299 struct sk_buff *skb = NULL;
5300 struct stmmac_xdp_buff ctx;
5301 int xdp_status = 0;
5302 int buf_sz;
5303
5304 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5305 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5306 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5307
5308 if (netif_msg_rx_status(priv)) {
5309 void *rx_head;
5310
5311 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5312 if (priv->extend_desc) {
5313 rx_head = (void *)rx_q->dma_erx;
5314 desc_size = sizeof(struct dma_extended_desc);
5315 } else {
5316 rx_head = (void *)rx_q->dma_rx;
5317 desc_size = sizeof(struct dma_desc);
5318 }
5319
5320 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5321 rx_q->dma_rx_phy, desc_size);
5322 }
5323 while (count < limit) {
5324 unsigned int buf1_len = 0, buf2_len = 0;
5325 enum pkt_hash_types hash_type;
5326 struct stmmac_rx_buffer *buf;
5327 struct dma_desc *np, *p;
5328 int entry;
5329 u32 hash;
5330
5331 if (!count && rx_q->state_saved) {
5332 skb = rx_q->state.skb;
5333 error = rx_q->state.error;
5334 len = rx_q->state.len;
5335 } else {
5336 rx_q->state_saved = false;
5337 skb = NULL;
5338 error = 0;
5339 len = 0;
5340 }
5341
5342 read_again:
5343 if (count >= limit)
5344 break;
5345
5346 buf1_len = 0;
5347 buf2_len = 0;
5348 entry = next_entry;
5349 buf = &rx_q->buf_pool[entry];
5350
5351 if (priv->extend_desc)
5352 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5353 else
5354 p = rx_q->dma_rx + entry;
5355
5356 /* read the status of the incoming frame */
5357 status = stmmac_rx_status(priv, &priv->xstats, p);
5358 /* check if managed by the DMA otherwise go ahead */
5359 if (unlikely(status & dma_own))
5360 break;
5361
5362 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5363 priv->dma_conf.dma_rx_size);
5364 next_entry = rx_q->cur_rx;
5365
5366 if (priv->extend_desc)
5367 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5368 else
5369 np = rx_q->dma_rx + next_entry;
5370
5371 prefetch(np);
5372
5373 if (priv->extend_desc)
5374 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5375 if (unlikely(status == discard_frame)) {
5376 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5377 buf->page = NULL;
5378 error = 1;
5379 if (!priv->hwts_rx_en)
5380 rx_errors++;
5381 }
5382
5383 if (unlikely(error && (status & rx_not_ls)))
5384 goto read_again;
5385 if (unlikely(error)) {
5386 dev_kfree_skb(skb);
5387 skb = NULL;
5388 count++;
5389 continue;
5390 }
5391
5392 /* Buffer is good. Go on. */
5393
5394 prefetch(page_address(buf->page) + buf->page_offset);
5395 if (buf->sec_page)
5396 prefetch(page_address(buf->sec_page));
5397
5398 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5399 len += buf1_len;
5400 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5401 len += buf2_len;
5402
5403 /* ACS is disabled; strip manually. */
5404 if (likely(!(status & rx_not_ls))) {
5405 if (buf2_len) {
5406 buf2_len -= ETH_FCS_LEN;
5407 len -= ETH_FCS_LEN;
5408 } else if (buf1_len) {
5409 buf1_len -= ETH_FCS_LEN;
5410 len -= ETH_FCS_LEN;
5411 }
5412 }
5413
5414 if (!skb) {
5415 unsigned int pre_len, sync_len;
5416
5417 dma_sync_single_for_cpu(priv->device, buf->addr,
5418 buf1_len, dma_dir);
5419
5420 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5421 xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5422 buf->page_offset, buf1_len, true);
5423
5424 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5425 buf->page_offset;
5426
5427 ctx.priv = priv;
5428 ctx.desc = p;
5429 ctx.ndesc = np;
5430
5431 skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5432 /* Due xdp_adjust_tail: DMA sync for_device
5433 * cover max len CPU touch
5434 */
5435 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5436 buf->page_offset;
5437 sync_len = max(sync_len, pre_len);
5438
5439 /* For Not XDP_PASS verdict */
5440 if (IS_ERR(skb)) {
5441 unsigned int xdp_res = -PTR_ERR(skb);
5442
5443 if (xdp_res & STMMAC_XDP_CONSUMED) {
5444 page_pool_put_page(rx_q->page_pool,
5445 virt_to_head_page(ctx.xdp.data),
5446 sync_len, true);
5447 buf->page = NULL;
5448 rx_dropped++;
5449
5450 /* Clear skb as it was set as
5451 * status by XDP program.
5452 */
5453 skb = NULL;
5454
5455 if (unlikely((status & rx_not_ls)))
5456 goto read_again;
5457
5458 count++;
5459 continue;
5460 } else if (xdp_res & (STMMAC_XDP_TX |
5461 STMMAC_XDP_REDIRECT)) {
5462 xdp_status |= xdp_res;
5463 buf->page = NULL;
5464 skb = NULL;
5465 count++;
5466 continue;
5467 }
5468 }
5469 }
5470
5471 if (!skb) {
5472 /* XDP program may expand or reduce tail */
5473 buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5474
5475 skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5476 if (!skb) {
5477 rx_dropped++;
5478 count++;
5479 goto drain_data;
5480 }
5481
5482 /* XDP program may adjust header */
5483 skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5484 skb_put(skb, buf1_len);
5485
5486 /* Data payload copied into SKB, page ready for recycle */
5487 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5488 buf->page = NULL;
5489 } else if (buf1_len) {
5490 dma_sync_single_for_cpu(priv->device, buf->addr,
5491 buf1_len, dma_dir);
5492 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5493 buf->page, buf->page_offset, buf1_len,
5494 priv->dma_conf.dma_buf_sz);
5495
5496 /* Data payload appended into SKB */
5497 skb_mark_for_recycle(skb);
5498 buf->page = NULL;
5499 }
5500
5501 if (buf2_len) {
5502 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5503 buf2_len, dma_dir);
5504 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5505 buf->sec_page, 0, buf2_len,
5506 priv->dma_conf.dma_buf_sz);
5507
5508 /* Data payload appended into SKB */
5509 skb_mark_for_recycle(skb);
5510 buf->sec_page = NULL;
5511 }
5512
5513 drain_data:
5514 if (likely(status & rx_not_ls))
5515 goto read_again;
5516 if (!skb)
5517 continue;
5518
5519 /* Got entire packet into SKB. Finish it. */
5520
5521 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5522 stmmac_rx_vlan(priv->dev, skb);
5523 skb->protocol = eth_type_trans(skb, priv->dev);
5524
5525 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5526 skb_checksum_none_assert(skb);
5527 else
5528 skb->ip_summed = CHECKSUM_UNNECESSARY;
5529
5530 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5531 skb_set_hash(skb, hash, hash_type);
5532
5533 skb_record_rx_queue(skb, queue);
5534 napi_gro_receive(&ch->rx_napi, skb);
5535 skb = NULL;
5536
5537 rx_packets++;
5538 rx_bytes += len;
5539 count++;
5540 }
5541
5542 if (status & rx_not_ls || skb) {
5543 rx_q->state_saved = true;
5544 rx_q->state.skb = skb;
5545 rx_q->state.error = error;
5546 rx_q->state.len = len;
5547 }
5548
5549 stmmac_finalize_xdp_rx(priv, xdp_status);
5550
5551 stmmac_rx_refill(priv, queue);
5552
5553 u64_stats_update_begin(&rxq_stats->napi_syncp);
5554 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5555 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5556 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5557 u64_stats_update_end(&rxq_stats->napi_syncp);
5558
5559 priv->xstats.rx_dropped += rx_dropped;
5560 priv->xstats.rx_errors += rx_errors;
5561
5562 return count;
5563 }
5564
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5565 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5566 {
5567 struct stmmac_channel *ch =
5568 container_of(napi, struct stmmac_channel, rx_napi);
5569 struct stmmac_priv *priv = ch->priv_data;
5570 struct stmmac_rxq_stats *rxq_stats;
5571 u32 chan = ch->index;
5572 int work_done;
5573
5574 rxq_stats = &priv->xstats.rxq_stats[chan];
5575 u64_stats_update_begin(&rxq_stats->napi_syncp);
5576 u64_stats_inc(&rxq_stats->napi.poll);
5577 u64_stats_update_end(&rxq_stats->napi_syncp);
5578
5579 work_done = stmmac_rx(priv, budget, chan);
5580 if (work_done < budget && napi_complete_done(napi, work_done)) {
5581 unsigned long flags;
5582
5583 spin_lock_irqsave(&ch->lock, flags);
5584 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5585 spin_unlock_irqrestore(&ch->lock, flags);
5586 }
5587
5588 return work_done;
5589 }
5590
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5591 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5592 {
5593 struct stmmac_channel *ch =
5594 container_of(napi, struct stmmac_channel, tx_napi);
5595 struct stmmac_priv *priv = ch->priv_data;
5596 struct stmmac_txq_stats *txq_stats;
5597 u32 chan = ch->index;
5598 int work_done;
5599
5600 txq_stats = &priv->xstats.txq_stats[chan];
5601 u64_stats_update_begin(&txq_stats->napi_syncp);
5602 u64_stats_inc(&txq_stats->napi.poll);
5603 u64_stats_update_end(&txq_stats->napi_syncp);
5604
5605 work_done = stmmac_tx_clean(priv, budget, chan);
5606 work_done = min(work_done, budget);
5607
5608 if (work_done < budget && napi_complete_done(napi, work_done)) {
5609 unsigned long flags;
5610
5611 spin_lock_irqsave(&ch->lock, flags);
5612 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5613 spin_unlock_irqrestore(&ch->lock, flags);
5614 }
5615
5616 return work_done;
5617 }
5618
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5619 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5620 {
5621 struct stmmac_channel *ch =
5622 container_of(napi, struct stmmac_channel, rxtx_napi);
5623 struct stmmac_priv *priv = ch->priv_data;
5624 int rx_done, tx_done, rxtx_done;
5625 struct stmmac_rxq_stats *rxq_stats;
5626 struct stmmac_txq_stats *txq_stats;
5627 u32 chan = ch->index;
5628
5629 rxq_stats = &priv->xstats.rxq_stats[chan];
5630 u64_stats_update_begin(&rxq_stats->napi_syncp);
5631 u64_stats_inc(&rxq_stats->napi.poll);
5632 u64_stats_update_end(&rxq_stats->napi_syncp);
5633
5634 txq_stats = &priv->xstats.txq_stats[chan];
5635 u64_stats_update_begin(&txq_stats->napi_syncp);
5636 u64_stats_inc(&txq_stats->napi.poll);
5637 u64_stats_update_end(&txq_stats->napi_syncp);
5638
5639 tx_done = stmmac_tx_clean(priv, budget, chan);
5640 tx_done = min(tx_done, budget);
5641
5642 rx_done = stmmac_rx_zc(priv, budget, chan);
5643
5644 rxtx_done = max(tx_done, rx_done);
5645
5646 /* If either TX or RX work is not complete, return budget
5647 * and keep pooling
5648 */
5649 if (rxtx_done >= budget)
5650 return budget;
5651
5652 /* all work done, exit the polling mode */
5653 if (napi_complete_done(napi, rxtx_done)) {
5654 unsigned long flags;
5655
5656 spin_lock_irqsave(&ch->lock, flags);
5657 /* Both RX and TX work done are compelte,
5658 * so enable both RX & TX IRQs.
5659 */
5660 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5661 spin_unlock_irqrestore(&ch->lock, flags);
5662 }
5663
5664 return min(rxtx_done, budget - 1);
5665 }
5666
5667 /**
5668 * stmmac_tx_timeout
5669 * @dev : Pointer to net device structure
5670 * @txqueue: the index of the hanging transmit queue
5671 * Description: this function is called when a packet transmission fails to
5672 * complete within a reasonable time. The driver will mark the error in the
5673 * netdev structure and arrange for the device to be reset to a sane state
5674 * in order to transmit a new packet.
5675 */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5676 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5677 {
5678 struct stmmac_priv *priv = netdev_priv(dev);
5679
5680 stmmac_global_err(priv);
5681 }
5682
5683 /**
5684 * stmmac_set_rx_mode - entry point for multicast addressing
5685 * @dev : pointer to the device structure
5686 * Description:
5687 * This function is a driver entry point which gets called by the kernel
5688 * whenever multicast addresses must be enabled/disabled.
5689 * Return value:
5690 * void.
5691 */
stmmac_set_rx_mode(struct net_device * dev)5692 static void stmmac_set_rx_mode(struct net_device *dev)
5693 {
5694 struct stmmac_priv *priv = netdev_priv(dev);
5695
5696 stmmac_set_filter(priv, priv->hw, dev);
5697 }
5698
5699 /**
5700 * stmmac_change_mtu - entry point to change MTU size for the device.
5701 * @dev : device pointer.
5702 * @new_mtu : the new MTU size for the device.
5703 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5704 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5705 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5706 * Return value:
5707 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5708 * file on failure.
5709 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5710 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5711 {
5712 struct stmmac_priv *priv = netdev_priv(dev);
5713 int txfifosz = priv->plat->tx_fifo_size;
5714 struct stmmac_dma_conf *dma_conf;
5715 const int mtu = new_mtu;
5716 int ret;
5717
5718 if (txfifosz == 0)
5719 txfifosz = priv->dma_cap.tx_fifo_size;
5720
5721 txfifosz /= priv->plat->tx_queues_to_use;
5722
5723 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5724 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5725 return -EINVAL;
5726 }
5727
5728 new_mtu = STMMAC_ALIGN(new_mtu);
5729
5730 /* If condition true, FIFO is too small or MTU too large */
5731 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5732 return -EINVAL;
5733
5734 if (netif_running(dev)) {
5735 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5736 /* Try to allocate the new DMA conf with the new mtu */
5737 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5738 if (IS_ERR(dma_conf)) {
5739 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5740 mtu);
5741 return PTR_ERR(dma_conf);
5742 }
5743
5744 stmmac_release(dev);
5745
5746 ret = __stmmac_open(dev, dma_conf);
5747 if (ret) {
5748 free_dma_desc_resources(priv, dma_conf);
5749 kfree(dma_conf);
5750 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5751 return ret;
5752 }
5753
5754 kfree(dma_conf);
5755
5756 stmmac_set_rx_mode(dev);
5757 }
5758
5759 dev->mtu = mtu;
5760 netdev_update_features(dev);
5761
5762 return 0;
5763 }
5764
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5765 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5766 netdev_features_t features)
5767 {
5768 struct stmmac_priv *priv = netdev_priv(dev);
5769
5770 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5771 features &= ~NETIF_F_RXCSUM;
5772
5773 if (!priv->plat->tx_coe)
5774 features &= ~NETIF_F_CSUM_MASK;
5775
5776 /* Some GMAC devices have a bugged Jumbo frame support that
5777 * needs to have the Tx COE disabled for oversized frames
5778 * (due to limited buffer sizes). In this case we disable
5779 * the TX csum insertion in the TDES and not use SF.
5780 */
5781 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5782 features &= ~NETIF_F_CSUM_MASK;
5783
5784 /* Disable tso if asked by ethtool */
5785 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5786 if (features & NETIF_F_TSO)
5787 priv->tso = true;
5788 else
5789 priv->tso = false;
5790 }
5791
5792 return features;
5793 }
5794
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5795 static int stmmac_set_features(struct net_device *netdev,
5796 netdev_features_t features)
5797 {
5798 struct stmmac_priv *priv = netdev_priv(netdev);
5799
5800 /* Keep the COE Type in case of csum is supporting */
5801 if (features & NETIF_F_RXCSUM)
5802 priv->hw->rx_csum = priv->plat->rx_coe;
5803 else
5804 priv->hw->rx_csum = 0;
5805 /* No check needed because rx_coe has been set before and it will be
5806 * fixed in case of issue.
5807 */
5808 stmmac_rx_ipc(priv, priv->hw);
5809
5810 if (priv->sph_cap) {
5811 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5812 u32 chan;
5813
5814 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5815 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5816 }
5817
5818 return 0;
5819 }
5820
stmmac_fpe_event_status(struct stmmac_priv * priv,int status)5821 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5822 {
5823 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5824 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5825 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5826 bool *hs_enable = &fpe_cfg->hs_enable;
5827
5828 if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5829 return;
5830
5831 /* If LP has sent verify mPacket, LP is FPE capable */
5832 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5833 if (*lp_state < FPE_STATE_CAPABLE)
5834 *lp_state = FPE_STATE_CAPABLE;
5835
5836 /* If user has requested FPE enable, quickly response */
5837 if (*hs_enable)
5838 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5839 fpe_cfg,
5840 MPACKET_RESPONSE);
5841 }
5842
5843 /* If Local has sent verify mPacket, Local is FPE capable */
5844 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5845 if (*lo_state < FPE_STATE_CAPABLE)
5846 *lo_state = FPE_STATE_CAPABLE;
5847 }
5848
5849 /* If LP has sent response mPacket, LP is entering FPE ON */
5850 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5851 *lp_state = FPE_STATE_ENTERING_ON;
5852
5853 /* If Local has sent response mPacket, Local is entering FPE ON */
5854 if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5855 *lo_state = FPE_STATE_ENTERING_ON;
5856
5857 if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5858 !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5859 priv->fpe_wq) {
5860 queue_work(priv->fpe_wq, &priv->fpe_task);
5861 }
5862 }
5863
stmmac_common_interrupt(struct stmmac_priv * priv)5864 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5865 {
5866 u32 rx_cnt = priv->plat->rx_queues_to_use;
5867 u32 tx_cnt = priv->plat->tx_queues_to_use;
5868 u32 queues_count;
5869 u32 queue;
5870 bool xmac;
5871
5872 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5873 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5874
5875 if (priv->irq_wake)
5876 pm_wakeup_event(priv->device, 0);
5877
5878 if (priv->dma_cap.estsel)
5879 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5880 &priv->xstats, tx_cnt);
5881
5882 if (priv->dma_cap.fpesel) {
5883 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5884 priv->dev);
5885
5886 stmmac_fpe_event_status(priv, status);
5887 }
5888
5889 /* To handle GMAC own interrupts */
5890 if ((priv->plat->has_gmac) || xmac) {
5891 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5892
5893 if (unlikely(status)) {
5894 /* For LPI we need to save the tx status */
5895 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5896 priv->tx_path_in_lpi_mode = true;
5897 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5898 priv->tx_path_in_lpi_mode = false;
5899 }
5900
5901 for (queue = 0; queue < queues_count; queue++) {
5902 status = stmmac_host_mtl_irq_status(priv, priv->hw,
5903 queue);
5904 }
5905
5906 /* PCS link status */
5907 if (priv->hw->pcs &&
5908 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5909 if (priv->xstats.pcs_link)
5910 netif_carrier_on(priv->dev);
5911 else
5912 netif_carrier_off(priv->dev);
5913 }
5914
5915 stmmac_timestamp_interrupt(priv, priv);
5916 }
5917 }
5918
5919 /**
5920 * stmmac_interrupt - main ISR
5921 * @irq: interrupt number.
5922 * @dev_id: to pass the net device pointer.
5923 * Description: this is the main driver interrupt service routine.
5924 * It can call:
5925 * o DMA service routine (to manage incoming frame reception and transmission
5926 * status)
5927 * o Core interrupts to manage: remote wake-up, management counter, LPI
5928 * interrupts.
5929 */
stmmac_interrupt(int irq,void * dev_id)5930 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5931 {
5932 struct net_device *dev = (struct net_device *)dev_id;
5933 struct stmmac_priv *priv = netdev_priv(dev);
5934
5935 /* Check if adapter is up */
5936 if (test_bit(STMMAC_DOWN, &priv->state))
5937 return IRQ_HANDLED;
5938
5939 /* Check if a fatal error happened */
5940 if (stmmac_safety_feat_interrupt(priv))
5941 return IRQ_HANDLED;
5942
5943 /* To handle Common interrupts */
5944 stmmac_common_interrupt(priv);
5945
5946 /* To handle DMA interrupts */
5947 stmmac_dma_interrupt(priv);
5948
5949 return IRQ_HANDLED;
5950 }
5951
stmmac_mac_interrupt(int irq,void * dev_id)5952 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5953 {
5954 struct net_device *dev = (struct net_device *)dev_id;
5955 struct stmmac_priv *priv = netdev_priv(dev);
5956
5957 /* Check if adapter is up */
5958 if (test_bit(STMMAC_DOWN, &priv->state))
5959 return IRQ_HANDLED;
5960
5961 /* To handle Common interrupts */
5962 stmmac_common_interrupt(priv);
5963
5964 return IRQ_HANDLED;
5965 }
5966
stmmac_safety_interrupt(int irq,void * dev_id)5967 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5968 {
5969 struct net_device *dev = (struct net_device *)dev_id;
5970 struct stmmac_priv *priv = netdev_priv(dev);
5971
5972 /* Check if adapter is up */
5973 if (test_bit(STMMAC_DOWN, &priv->state))
5974 return IRQ_HANDLED;
5975
5976 /* Check if a fatal error happened */
5977 stmmac_safety_feat_interrupt(priv);
5978
5979 return IRQ_HANDLED;
5980 }
5981
stmmac_msi_intr_tx(int irq,void * data)5982 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5983 {
5984 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5985 struct stmmac_dma_conf *dma_conf;
5986 int chan = tx_q->queue_index;
5987 struct stmmac_priv *priv;
5988 int status;
5989
5990 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5991 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5992
5993 /* Check if adapter is up */
5994 if (test_bit(STMMAC_DOWN, &priv->state))
5995 return IRQ_HANDLED;
5996
5997 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5998
5999 if (unlikely(status & tx_hard_error_bump_tc)) {
6000 /* Try to bump up the dma threshold on this failure */
6001 stmmac_bump_dma_threshold(priv, chan);
6002 } else if (unlikely(status == tx_hard_error)) {
6003 stmmac_tx_err(priv, chan);
6004 }
6005
6006 return IRQ_HANDLED;
6007 }
6008
stmmac_msi_intr_rx(int irq,void * data)6009 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6010 {
6011 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6012 struct stmmac_dma_conf *dma_conf;
6013 int chan = rx_q->queue_index;
6014 struct stmmac_priv *priv;
6015
6016 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6017 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6018
6019 /* Check if adapter is up */
6020 if (test_bit(STMMAC_DOWN, &priv->state))
6021 return IRQ_HANDLED;
6022
6023 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6024
6025 return IRQ_HANDLED;
6026 }
6027
6028 /**
6029 * stmmac_ioctl - Entry point for the Ioctl
6030 * @dev: Device pointer.
6031 * @rq: An IOCTL specefic structure, that can contain a pointer to
6032 * a proprietary structure used to pass information to the driver.
6033 * @cmd: IOCTL command
6034 * Description:
6035 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6036 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6037 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6038 {
6039 struct stmmac_priv *priv = netdev_priv (dev);
6040 int ret = -EOPNOTSUPP;
6041
6042 if (!netif_running(dev))
6043 return -EINVAL;
6044
6045 switch (cmd) {
6046 case SIOCGMIIPHY:
6047 case SIOCGMIIREG:
6048 case SIOCSMIIREG:
6049 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6050 break;
6051 case SIOCSHWTSTAMP:
6052 ret = stmmac_hwtstamp_set(dev, rq);
6053 break;
6054 case SIOCGHWTSTAMP:
6055 ret = stmmac_hwtstamp_get(dev, rq);
6056 break;
6057 default:
6058 break;
6059 }
6060
6061 return ret;
6062 }
6063
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6064 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6065 void *cb_priv)
6066 {
6067 struct stmmac_priv *priv = cb_priv;
6068 int ret = -EOPNOTSUPP;
6069
6070 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6071 return ret;
6072
6073 __stmmac_disable_all_queues(priv);
6074
6075 switch (type) {
6076 case TC_SETUP_CLSU32:
6077 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6078 break;
6079 case TC_SETUP_CLSFLOWER:
6080 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6081 break;
6082 default:
6083 break;
6084 }
6085
6086 stmmac_enable_all_queues(priv);
6087 return ret;
6088 }
6089
6090 static LIST_HEAD(stmmac_block_cb_list);
6091
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6092 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6093 void *type_data)
6094 {
6095 struct stmmac_priv *priv = netdev_priv(ndev);
6096
6097 switch (type) {
6098 case TC_QUERY_CAPS:
6099 return stmmac_tc_query_caps(priv, priv, type_data);
6100 case TC_SETUP_BLOCK:
6101 return flow_block_cb_setup_simple(type_data,
6102 &stmmac_block_cb_list,
6103 stmmac_setup_tc_block_cb,
6104 priv, priv, true);
6105 case TC_SETUP_QDISC_CBS:
6106 return stmmac_tc_setup_cbs(priv, priv, type_data);
6107 case TC_SETUP_QDISC_TAPRIO:
6108 return stmmac_tc_setup_taprio(priv, priv, type_data);
6109 case TC_SETUP_QDISC_ETF:
6110 return stmmac_tc_setup_etf(priv, priv, type_data);
6111 default:
6112 return -EOPNOTSUPP;
6113 }
6114 }
6115
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6116 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6117 struct net_device *sb_dev)
6118 {
6119 int gso = skb_shinfo(skb)->gso_type;
6120
6121 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6122 /*
6123 * There is no way to determine the number of TSO/USO
6124 * capable Queues. Let's use always the Queue 0
6125 * because if TSO/USO is supported then at least this
6126 * one will be capable.
6127 */
6128 return 0;
6129 }
6130
6131 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6132 }
6133
stmmac_set_mac_address(struct net_device * ndev,void * addr)6134 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6135 {
6136 struct stmmac_priv *priv = netdev_priv(ndev);
6137 int ret = 0;
6138
6139 ret = pm_runtime_resume_and_get(priv->device);
6140 if (ret < 0)
6141 return ret;
6142
6143 ret = eth_mac_addr(ndev, addr);
6144 if (ret)
6145 goto set_mac_error;
6146
6147 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6148
6149 set_mac_error:
6150 pm_runtime_put(priv->device);
6151
6152 return ret;
6153 }
6154
6155 #ifdef CONFIG_DEBUG_FS
6156 static struct dentry *stmmac_fs_dir;
6157
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6158 static void sysfs_display_ring(void *head, int size, int extend_desc,
6159 struct seq_file *seq, dma_addr_t dma_phy_addr)
6160 {
6161 int i;
6162 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6163 struct dma_desc *p = (struct dma_desc *)head;
6164 dma_addr_t dma_addr;
6165
6166 for (i = 0; i < size; i++) {
6167 if (extend_desc) {
6168 dma_addr = dma_phy_addr + i * sizeof(*ep);
6169 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6170 i, &dma_addr,
6171 le32_to_cpu(ep->basic.des0),
6172 le32_to_cpu(ep->basic.des1),
6173 le32_to_cpu(ep->basic.des2),
6174 le32_to_cpu(ep->basic.des3));
6175 ep++;
6176 } else {
6177 dma_addr = dma_phy_addr + i * sizeof(*p);
6178 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6179 i, &dma_addr,
6180 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6181 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6182 p++;
6183 }
6184 seq_printf(seq, "\n");
6185 }
6186 }
6187
stmmac_rings_status_show(struct seq_file * seq,void * v)6188 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6189 {
6190 struct net_device *dev = seq->private;
6191 struct stmmac_priv *priv = netdev_priv(dev);
6192 u32 rx_count = priv->plat->rx_queues_to_use;
6193 u32 tx_count = priv->plat->tx_queues_to_use;
6194 u32 queue;
6195
6196 if ((dev->flags & IFF_UP) == 0)
6197 return 0;
6198
6199 for (queue = 0; queue < rx_count; queue++) {
6200 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6201
6202 seq_printf(seq, "RX Queue %d:\n", queue);
6203
6204 if (priv->extend_desc) {
6205 seq_printf(seq, "Extended descriptor ring:\n");
6206 sysfs_display_ring((void *)rx_q->dma_erx,
6207 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6208 } else {
6209 seq_printf(seq, "Descriptor ring:\n");
6210 sysfs_display_ring((void *)rx_q->dma_rx,
6211 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6212 }
6213 }
6214
6215 for (queue = 0; queue < tx_count; queue++) {
6216 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6217
6218 seq_printf(seq, "TX Queue %d:\n", queue);
6219
6220 if (priv->extend_desc) {
6221 seq_printf(seq, "Extended descriptor ring:\n");
6222 sysfs_display_ring((void *)tx_q->dma_etx,
6223 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6224 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6225 seq_printf(seq, "Descriptor ring:\n");
6226 sysfs_display_ring((void *)tx_q->dma_tx,
6227 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6228 }
6229 }
6230
6231 return 0;
6232 }
6233 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6234
stmmac_dma_cap_show(struct seq_file * seq,void * v)6235 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6236 {
6237 static const char * const dwxgmac_timestamp_source[] = {
6238 "None",
6239 "Internal",
6240 "External",
6241 "Both",
6242 };
6243 static const char * const dwxgmac_safety_feature_desc[] = {
6244 "No",
6245 "All Safety Features with ECC and Parity",
6246 "All Safety Features without ECC or Parity",
6247 "All Safety Features with Parity Only",
6248 "ECC Only",
6249 "UNDEFINED",
6250 "UNDEFINED",
6251 "UNDEFINED",
6252 };
6253 struct net_device *dev = seq->private;
6254 struct stmmac_priv *priv = netdev_priv(dev);
6255
6256 if (!priv->hw_cap_support) {
6257 seq_printf(seq, "DMA HW features not supported\n");
6258 return 0;
6259 }
6260
6261 seq_printf(seq, "==============================\n");
6262 seq_printf(seq, "\tDMA HW features\n");
6263 seq_printf(seq, "==============================\n");
6264
6265 seq_printf(seq, "\t10/100 Mbps: %s\n",
6266 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6267 seq_printf(seq, "\t1000 Mbps: %s\n",
6268 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6269 seq_printf(seq, "\tHalf duplex: %s\n",
6270 (priv->dma_cap.half_duplex) ? "Y" : "N");
6271 if (priv->plat->has_xgmac) {
6272 seq_printf(seq,
6273 "\tNumber of Additional MAC address registers: %d\n",
6274 priv->dma_cap.multi_addr);
6275 } else {
6276 seq_printf(seq, "\tHash Filter: %s\n",
6277 (priv->dma_cap.hash_filter) ? "Y" : "N");
6278 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6279 (priv->dma_cap.multi_addr) ? "Y" : "N");
6280 }
6281 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6282 (priv->dma_cap.pcs) ? "Y" : "N");
6283 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6284 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6285 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6286 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6287 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6288 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6289 seq_printf(seq, "\tRMON module: %s\n",
6290 (priv->dma_cap.rmon) ? "Y" : "N");
6291 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6292 (priv->dma_cap.time_stamp) ? "Y" : "N");
6293 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6294 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6295 if (priv->plat->has_xgmac)
6296 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6297 dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6298 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6299 (priv->dma_cap.eee) ? "Y" : "N");
6300 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6301 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6302 (priv->dma_cap.tx_coe) ? "Y" : "N");
6303 if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6304 priv->plat->has_xgmac) {
6305 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6306 (priv->dma_cap.rx_coe) ? "Y" : "N");
6307 } else {
6308 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6309 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6310 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6311 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6312 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6313 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6314 }
6315 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6316 priv->dma_cap.number_rx_channel);
6317 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6318 priv->dma_cap.number_tx_channel);
6319 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6320 priv->dma_cap.number_rx_queues);
6321 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6322 priv->dma_cap.number_tx_queues);
6323 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6324 (priv->dma_cap.enh_desc) ? "Y" : "N");
6325 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6326 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6327 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6328 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6329 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6330 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6331 priv->dma_cap.pps_out_num);
6332 seq_printf(seq, "\tSafety Features: %s\n",
6333 dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6334 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6335 priv->dma_cap.frpsel ? "Y" : "N");
6336 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6337 priv->dma_cap.host_dma_width);
6338 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6339 priv->dma_cap.rssen ? "Y" : "N");
6340 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6341 priv->dma_cap.vlhash ? "Y" : "N");
6342 seq_printf(seq, "\tSplit Header: %s\n",
6343 priv->dma_cap.sphen ? "Y" : "N");
6344 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6345 priv->dma_cap.vlins ? "Y" : "N");
6346 seq_printf(seq, "\tDouble VLAN: %s\n",
6347 priv->dma_cap.dvlan ? "Y" : "N");
6348 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6349 priv->dma_cap.l3l4fnum);
6350 seq_printf(seq, "\tARP Offloading: %s\n",
6351 priv->dma_cap.arpoffsel ? "Y" : "N");
6352 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6353 priv->dma_cap.estsel ? "Y" : "N");
6354 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6355 priv->dma_cap.fpesel ? "Y" : "N");
6356 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6357 priv->dma_cap.tbssel ? "Y" : "N");
6358 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6359 priv->dma_cap.tbs_ch_num);
6360 seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6361 priv->dma_cap.sgfsel ? "Y" : "N");
6362 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6363 BIT(priv->dma_cap.ttsfd) >> 1);
6364 seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6365 priv->dma_cap.numtc);
6366 seq_printf(seq, "\tDCB Feature: %s\n",
6367 priv->dma_cap.dcben ? "Y" : "N");
6368 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6369 priv->dma_cap.advthword ? "Y" : "N");
6370 seq_printf(seq, "\tPTP Offload: %s\n",
6371 priv->dma_cap.ptoen ? "Y" : "N");
6372 seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6373 priv->dma_cap.osten ? "Y" : "N");
6374 seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6375 priv->dma_cap.pfcen ? "Y" : "N");
6376 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6377 BIT(priv->dma_cap.frpes) << 6);
6378 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6379 BIT(priv->dma_cap.frpbs) << 6);
6380 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6381 priv->dma_cap.frppipe_num);
6382 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6383 priv->dma_cap.nrvf_num ?
6384 (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6385 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6386 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6387 seq_printf(seq, "\tDepth of GCL: %lu\n",
6388 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6389 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6390 priv->dma_cap.cbtisel ? "Y" : "N");
6391 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6392 priv->dma_cap.aux_snapshot_n);
6393 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6394 priv->dma_cap.pou_ost_en ? "Y" : "N");
6395 seq_printf(seq, "\tEnhanced DMA: %s\n",
6396 priv->dma_cap.edma ? "Y" : "N");
6397 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6398 priv->dma_cap.ediffc ? "Y" : "N");
6399 seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6400 priv->dma_cap.vxn ? "Y" : "N");
6401 seq_printf(seq, "\tDebug Memory Interface: %s\n",
6402 priv->dma_cap.dbgmem ? "Y" : "N");
6403 seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6404 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6405 return 0;
6406 }
6407 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6408
6409 /* Use network device events to rename debugfs file entries.
6410 */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6411 static int stmmac_device_event(struct notifier_block *unused,
6412 unsigned long event, void *ptr)
6413 {
6414 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6415 struct stmmac_priv *priv = netdev_priv(dev);
6416
6417 if (dev->netdev_ops != &stmmac_netdev_ops)
6418 goto done;
6419
6420 switch (event) {
6421 case NETDEV_CHANGENAME:
6422 if (priv->dbgfs_dir)
6423 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6424 priv->dbgfs_dir,
6425 stmmac_fs_dir,
6426 dev->name);
6427 break;
6428 }
6429 done:
6430 return NOTIFY_DONE;
6431 }
6432
6433 static struct notifier_block stmmac_notifier = {
6434 .notifier_call = stmmac_device_event,
6435 };
6436
stmmac_init_fs(struct net_device * dev)6437 static void stmmac_init_fs(struct net_device *dev)
6438 {
6439 struct stmmac_priv *priv = netdev_priv(dev);
6440
6441 rtnl_lock();
6442
6443 /* Create per netdev entries */
6444 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6445
6446 /* Entry to report DMA RX/TX rings */
6447 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6448 &stmmac_rings_status_fops);
6449
6450 /* Entry to report the DMA HW features */
6451 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6452 &stmmac_dma_cap_fops);
6453
6454 rtnl_unlock();
6455 }
6456
stmmac_exit_fs(struct net_device * dev)6457 static void stmmac_exit_fs(struct net_device *dev)
6458 {
6459 struct stmmac_priv *priv = netdev_priv(dev);
6460
6461 debugfs_remove_recursive(priv->dbgfs_dir);
6462 }
6463 #endif /* CONFIG_DEBUG_FS */
6464
stmmac_vid_crc32_le(__le16 vid_le)6465 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6466 {
6467 unsigned char *data = (unsigned char *)&vid_le;
6468 unsigned char data_byte = 0;
6469 u32 crc = ~0x0;
6470 u32 temp = 0;
6471 int i, bits;
6472
6473 bits = get_bitmask_order(VLAN_VID_MASK);
6474 for (i = 0; i < bits; i++) {
6475 if ((i % 8) == 0)
6476 data_byte = data[i / 8];
6477
6478 temp = ((crc & 1) ^ data_byte) & 1;
6479 crc >>= 1;
6480 data_byte >>= 1;
6481
6482 if (temp)
6483 crc ^= 0xedb88320;
6484 }
6485
6486 return crc;
6487 }
6488
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6489 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6490 {
6491 u32 crc, hash = 0;
6492 u16 pmatch = 0;
6493 int count = 0;
6494 u16 vid = 0;
6495
6496 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6497 __le16 vid_le = cpu_to_le16(vid);
6498 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6499 hash |= (1 << crc);
6500 count++;
6501 }
6502
6503 if (!priv->dma_cap.vlhash) {
6504 if (count > 2) /* VID = 0 always passes filter */
6505 return -EOPNOTSUPP;
6506
6507 pmatch = vid;
6508 hash = 0;
6509 }
6510
6511 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6512 }
6513
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6514 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6515 {
6516 struct stmmac_priv *priv = netdev_priv(ndev);
6517 bool is_double = false;
6518 int ret;
6519
6520 ret = pm_runtime_resume_and_get(priv->device);
6521 if (ret < 0)
6522 return ret;
6523
6524 if (be16_to_cpu(proto) == ETH_P_8021AD)
6525 is_double = true;
6526
6527 set_bit(vid, priv->active_vlans);
6528 ret = stmmac_vlan_update(priv, is_double);
6529 if (ret) {
6530 clear_bit(vid, priv->active_vlans);
6531 goto err_pm_put;
6532 }
6533
6534 if (priv->hw->num_vlan) {
6535 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6536 if (ret)
6537 goto err_pm_put;
6538 }
6539 err_pm_put:
6540 pm_runtime_put(priv->device);
6541
6542 return ret;
6543 }
6544
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6545 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6546 {
6547 struct stmmac_priv *priv = netdev_priv(ndev);
6548 bool is_double = false;
6549 int ret;
6550
6551 ret = pm_runtime_resume_and_get(priv->device);
6552 if (ret < 0)
6553 return ret;
6554
6555 if (be16_to_cpu(proto) == ETH_P_8021AD)
6556 is_double = true;
6557
6558 clear_bit(vid, priv->active_vlans);
6559
6560 if (priv->hw->num_vlan) {
6561 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6562 if (ret)
6563 goto del_vlan_error;
6564 }
6565
6566 ret = stmmac_vlan_update(priv, is_double);
6567
6568 del_vlan_error:
6569 pm_runtime_put(priv->device);
6570
6571 return ret;
6572 }
6573
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6574 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6575 {
6576 struct stmmac_priv *priv = netdev_priv(dev);
6577
6578 switch (bpf->command) {
6579 case XDP_SETUP_PROG:
6580 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6581 case XDP_SETUP_XSK_POOL:
6582 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6583 bpf->xsk.queue_id);
6584 default:
6585 return -EOPNOTSUPP;
6586 }
6587 }
6588
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6589 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6590 struct xdp_frame **frames, u32 flags)
6591 {
6592 struct stmmac_priv *priv = netdev_priv(dev);
6593 int cpu = smp_processor_id();
6594 struct netdev_queue *nq;
6595 int i, nxmit = 0;
6596 int queue;
6597
6598 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6599 return -ENETDOWN;
6600
6601 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6602 return -EINVAL;
6603
6604 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6605 nq = netdev_get_tx_queue(priv->dev, queue);
6606
6607 __netif_tx_lock(nq, cpu);
6608 /* Avoids TX time-out as we are sharing with slow path */
6609 txq_trans_cond_update(nq);
6610
6611 for (i = 0; i < num_frames; i++) {
6612 int res;
6613
6614 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6615 if (res == STMMAC_XDP_CONSUMED)
6616 break;
6617
6618 nxmit++;
6619 }
6620
6621 if (flags & XDP_XMIT_FLUSH) {
6622 stmmac_flush_tx_descriptors(priv, queue);
6623 stmmac_tx_timer_arm(priv, queue);
6624 }
6625
6626 __netif_tx_unlock(nq);
6627
6628 return nxmit;
6629 }
6630
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6631 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6632 {
6633 struct stmmac_channel *ch = &priv->channel[queue];
6634 unsigned long flags;
6635
6636 spin_lock_irqsave(&ch->lock, flags);
6637 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6638 spin_unlock_irqrestore(&ch->lock, flags);
6639
6640 stmmac_stop_rx_dma(priv, queue);
6641 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6642 }
6643
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6644 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6645 {
6646 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6647 struct stmmac_channel *ch = &priv->channel[queue];
6648 unsigned long flags;
6649 u32 buf_size;
6650 int ret;
6651
6652 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6653 if (ret) {
6654 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6655 return;
6656 }
6657
6658 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6659 if (ret) {
6660 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6661 netdev_err(priv->dev, "Failed to init RX desc.\n");
6662 return;
6663 }
6664
6665 stmmac_reset_rx_queue(priv, queue);
6666 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6667
6668 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6669 rx_q->dma_rx_phy, rx_q->queue_index);
6670
6671 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6672 sizeof(struct dma_desc));
6673 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6674 rx_q->rx_tail_addr, rx_q->queue_index);
6675
6676 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6677 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6678 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6679 buf_size,
6680 rx_q->queue_index);
6681 } else {
6682 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6683 priv->dma_conf.dma_buf_sz,
6684 rx_q->queue_index);
6685 }
6686
6687 stmmac_start_rx_dma(priv, queue);
6688
6689 spin_lock_irqsave(&ch->lock, flags);
6690 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6691 spin_unlock_irqrestore(&ch->lock, flags);
6692 }
6693
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6694 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6695 {
6696 struct stmmac_channel *ch = &priv->channel[queue];
6697 unsigned long flags;
6698
6699 spin_lock_irqsave(&ch->lock, flags);
6700 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6701 spin_unlock_irqrestore(&ch->lock, flags);
6702
6703 stmmac_stop_tx_dma(priv, queue);
6704 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6705 }
6706
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6707 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6708 {
6709 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6710 struct stmmac_channel *ch = &priv->channel[queue];
6711 unsigned long flags;
6712 int ret;
6713
6714 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6715 if (ret) {
6716 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6717 return;
6718 }
6719
6720 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
6721 if (ret) {
6722 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6723 netdev_err(priv->dev, "Failed to init TX desc.\n");
6724 return;
6725 }
6726
6727 stmmac_reset_tx_queue(priv, queue);
6728 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6729
6730 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6731 tx_q->dma_tx_phy, tx_q->queue_index);
6732
6733 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6734 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6735
6736 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6737 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6738 tx_q->tx_tail_addr, tx_q->queue_index);
6739
6740 stmmac_start_tx_dma(priv, queue);
6741
6742 spin_lock_irqsave(&ch->lock, flags);
6743 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6744 spin_unlock_irqrestore(&ch->lock, flags);
6745 }
6746
stmmac_xdp_release(struct net_device * dev)6747 void stmmac_xdp_release(struct net_device *dev)
6748 {
6749 struct stmmac_priv *priv = netdev_priv(dev);
6750 u32 chan;
6751
6752 /* Ensure tx function is not running */
6753 netif_tx_disable(dev);
6754
6755 /* Disable NAPI process */
6756 stmmac_disable_all_queues(priv);
6757
6758 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6759 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6760
6761 /* Free the IRQ lines */
6762 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6763
6764 /* Stop TX/RX DMA channels */
6765 stmmac_stop_all_dma(priv);
6766
6767 /* Release and free the Rx/Tx resources */
6768 free_dma_desc_resources(priv, &priv->dma_conf);
6769
6770 /* Disable the MAC Rx/Tx */
6771 stmmac_mac_set(priv, priv->ioaddr, false);
6772
6773 /* set trans_start so we don't get spurious
6774 * watchdogs during reset
6775 */
6776 netif_trans_update(dev);
6777 netif_carrier_off(dev);
6778 }
6779
stmmac_xdp_open(struct net_device * dev)6780 int stmmac_xdp_open(struct net_device *dev)
6781 {
6782 struct stmmac_priv *priv = netdev_priv(dev);
6783 u32 rx_cnt = priv->plat->rx_queues_to_use;
6784 u32 tx_cnt = priv->plat->tx_queues_to_use;
6785 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6786 struct stmmac_rx_queue *rx_q;
6787 struct stmmac_tx_queue *tx_q;
6788 u32 buf_size;
6789 bool sph_en;
6790 u32 chan;
6791 int ret;
6792
6793 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6794 if (ret < 0) {
6795 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6796 __func__);
6797 goto dma_desc_error;
6798 }
6799
6800 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6801 if (ret < 0) {
6802 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6803 __func__);
6804 goto init_error;
6805 }
6806
6807 stmmac_reset_queues_param(priv);
6808
6809 /* DMA CSR Channel configuration */
6810 for (chan = 0; chan < dma_csr_ch; chan++) {
6811 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6812 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6813 }
6814
6815 /* Adjust Split header */
6816 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6817
6818 /* DMA RX Channel Configuration */
6819 for (chan = 0; chan < rx_cnt; chan++) {
6820 rx_q = &priv->dma_conf.rx_queue[chan];
6821
6822 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6823 rx_q->dma_rx_phy, chan);
6824
6825 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6826 (rx_q->buf_alloc_num *
6827 sizeof(struct dma_desc));
6828 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6829 rx_q->rx_tail_addr, chan);
6830
6831 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6832 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6833 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6834 buf_size,
6835 rx_q->queue_index);
6836 } else {
6837 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6838 priv->dma_conf.dma_buf_sz,
6839 rx_q->queue_index);
6840 }
6841
6842 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6843 }
6844
6845 /* DMA TX Channel Configuration */
6846 for (chan = 0; chan < tx_cnt; chan++) {
6847 tx_q = &priv->dma_conf.tx_queue[chan];
6848
6849 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6850 tx_q->dma_tx_phy, chan);
6851
6852 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6853 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6854 tx_q->tx_tail_addr, chan);
6855
6856 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6857 tx_q->txtimer.function = stmmac_tx_timer;
6858 }
6859
6860 /* Enable the MAC Rx/Tx */
6861 stmmac_mac_set(priv, priv->ioaddr, true);
6862
6863 /* Start Rx & Tx DMA Channels */
6864 stmmac_start_all_dma(priv);
6865
6866 ret = stmmac_request_irq(dev);
6867 if (ret)
6868 goto irq_error;
6869
6870 /* Enable NAPI process*/
6871 stmmac_enable_all_queues(priv);
6872 netif_carrier_on(dev);
6873 netif_tx_start_all_queues(dev);
6874 stmmac_enable_all_dma_irq(priv);
6875
6876 return 0;
6877
6878 irq_error:
6879 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6880 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6881
6882 stmmac_hw_teardown(dev);
6883 init_error:
6884 free_dma_desc_resources(priv, &priv->dma_conf);
6885 dma_desc_error:
6886 return ret;
6887 }
6888
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)6889 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6890 {
6891 struct stmmac_priv *priv = netdev_priv(dev);
6892 struct stmmac_rx_queue *rx_q;
6893 struct stmmac_tx_queue *tx_q;
6894 struct stmmac_channel *ch;
6895
6896 if (test_bit(STMMAC_DOWN, &priv->state) ||
6897 !netif_carrier_ok(priv->dev))
6898 return -ENETDOWN;
6899
6900 if (!stmmac_xdp_is_enabled(priv))
6901 return -EINVAL;
6902
6903 if (queue >= priv->plat->rx_queues_to_use ||
6904 queue >= priv->plat->tx_queues_to_use)
6905 return -EINVAL;
6906
6907 rx_q = &priv->dma_conf.rx_queue[queue];
6908 tx_q = &priv->dma_conf.tx_queue[queue];
6909 ch = &priv->channel[queue];
6910
6911 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6912 return -EINVAL;
6913
6914 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6915 /* EQoS does not have per-DMA channel SW interrupt,
6916 * so we schedule RX Napi straight-away.
6917 */
6918 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6919 __napi_schedule(&ch->rxtx_napi);
6920 }
6921
6922 return 0;
6923 }
6924
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)6925 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6926 {
6927 struct stmmac_priv *priv = netdev_priv(dev);
6928 u32 tx_cnt = priv->plat->tx_queues_to_use;
6929 u32 rx_cnt = priv->plat->rx_queues_to_use;
6930 unsigned int start;
6931 int q;
6932
6933 for (q = 0; q < tx_cnt; q++) {
6934 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
6935 u64 tx_packets;
6936 u64 tx_bytes;
6937
6938 do {
6939 start = u64_stats_fetch_begin(&txq_stats->q_syncp);
6940 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
6941 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
6942 do {
6943 start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
6944 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
6945 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
6946
6947 stats->tx_packets += tx_packets;
6948 stats->tx_bytes += tx_bytes;
6949 }
6950
6951 for (q = 0; q < rx_cnt; q++) {
6952 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
6953 u64 rx_packets;
6954 u64 rx_bytes;
6955
6956 do {
6957 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
6958 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
6959 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
6960 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
6961
6962 stats->rx_packets += rx_packets;
6963 stats->rx_bytes += rx_bytes;
6964 }
6965
6966 stats->rx_dropped = priv->xstats.rx_dropped;
6967 stats->rx_errors = priv->xstats.rx_errors;
6968 stats->tx_dropped = priv->xstats.tx_dropped;
6969 stats->tx_errors = priv->xstats.tx_errors;
6970 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6971 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6972 stats->rx_length_errors = priv->xstats.rx_length;
6973 stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6974 stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
6975 stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
6976 }
6977
6978 static const struct net_device_ops stmmac_netdev_ops = {
6979 .ndo_open = stmmac_open,
6980 .ndo_start_xmit = stmmac_xmit,
6981 .ndo_stop = stmmac_release,
6982 .ndo_change_mtu = stmmac_change_mtu,
6983 .ndo_fix_features = stmmac_fix_features,
6984 .ndo_set_features = stmmac_set_features,
6985 .ndo_set_rx_mode = stmmac_set_rx_mode,
6986 .ndo_tx_timeout = stmmac_tx_timeout,
6987 .ndo_eth_ioctl = stmmac_ioctl,
6988 .ndo_get_stats64 = stmmac_get_stats64,
6989 .ndo_setup_tc = stmmac_setup_tc,
6990 .ndo_select_queue = stmmac_select_queue,
6991 .ndo_set_mac_address = stmmac_set_mac_address,
6992 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6993 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6994 .ndo_bpf = stmmac_bpf,
6995 .ndo_xdp_xmit = stmmac_xdp_xmit,
6996 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
6997 };
6998
stmmac_reset_subtask(struct stmmac_priv * priv)6999 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7000 {
7001 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7002 return;
7003 if (test_bit(STMMAC_DOWN, &priv->state))
7004 return;
7005
7006 netdev_err(priv->dev, "Reset adapter.\n");
7007
7008 rtnl_lock();
7009 netif_trans_update(priv->dev);
7010 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7011 usleep_range(1000, 2000);
7012
7013 set_bit(STMMAC_DOWN, &priv->state);
7014 dev_close(priv->dev);
7015 dev_open(priv->dev, NULL);
7016 clear_bit(STMMAC_DOWN, &priv->state);
7017 clear_bit(STMMAC_RESETING, &priv->state);
7018 rtnl_unlock();
7019 }
7020
stmmac_service_task(struct work_struct * work)7021 static void stmmac_service_task(struct work_struct *work)
7022 {
7023 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7024 service_task);
7025
7026 stmmac_reset_subtask(priv);
7027 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7028 }
7029
7030 /**
7031 * stmmac_hw_init - Init the MAC device
7032 * @priv: driver private structure
7033 * Description: this function is to configure the MAC device according to
7034 * some platform parameters or the HW capability register. It prepares the
7035 * driver to use either ring or chain modes and to setup either enhanced or
7036 * normal descriptors.
7037 */
stmmac_hw_init(struct stmmac_priv * priv)7038 static int stmmac_hw_init(struct stmmac_priv *priv)
7039 {
7040 int ret;
7041
7042 /* dwmac-sun8i only work in chain mode */
7043 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7044 chain_mode = 1;
7045 priv->chain_mode = chain_mode;
7046
7047 /* Initialize HW Interface */
7048 ret = stmmac_hwif_init(priv);
7049 if (ret)
7050 return ret;
7051
7052 /* Get the HW capability (new GMAC newer than 3.50a) */
7053 priv->hw_cap_support = stmmac_get_hw_features(priv);
7054 if (priv->hw_cap_support) {
7055 dev_info(priv->device, "DMA HW capability register supported\n");
7056
7057 /* We can override some gmac/dma configuration fields: e.g.
7058 * enh_desc, tx_coe (e.g. that are passed through the
7059 * platform) with the values from the HW capability
7060 * register (if supported).
7061 */
7062 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7063 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7064 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7065 priv->hw->pmt = priv->plat->pmt;
7066 if (priv->dma_cap.hash_tb_sz) {
7067 priv->hw->multicast_filter_bins =
7068 (BIT(priv->dma_cap.hash_tb_sz) << 5);
7069 priv->hw->mcast_bits_log2 =
7070 ilog2(priv->hw->multicast_filter_bins);
7071 }
7072
7073 /* TXCOE doesn't work in thresh DMA mode */
7074 if (priv->plat->force_thresh_dma_mode)
7075 priv->plat->tx_coe = 0;
7076 else
7077 priv->plat->tx_coe = priv->dma_cap.tx_coe;
7078
7079 /* In case of GMAC4 rx_coe is from HW cap register. */
7080 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7081
7082 if (priv->dma_cap.rx_coe_type2)
7083 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7084 else if (priv->dma_cap.rx_coe_type1)
7085 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7086
7087 } else {
7088 dev_info(priv->device, "No HW DMA feature register supported\n");
7089 }
7090
7091 if (priv->plat->rx_coe) {
7092 priv->hw->rx_csum = priv->plat->rx_coe;
7093 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7094 if (priv->synopsys_id < DWMAC_CORE_4_00)
7095 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7096 }
7097 if (priv->plat->tx_coe)
7098 dev_info(priv->device, "TX Checksum insertion supported\n");
7099
7100 if (priv->plat->pmt) {
7101 dev_info(priv->device, "Wake-Up On Lan supported\n");
7102 device_set_wakeup_capable(priv->device, 1);
7103 }
7104
7105 if (priv->dma_cap.tsoen)
7106 dev_info(priv->device, "TSO supported\n");
7107
7108 if (priv->dma_cap.number_rx_queues &&
7109 priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7110 dev_warn(priv->device,
7111 "Number of Rx queues (%u) exceeds dma capability\n",
7112 priv->plat->rx_queues_to_use);
7113 priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7114 }
7115 if (priv->dma_cap.number_tx_queues &&
7116 priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7117 dev_warn(priv->device,
7118 "Number of Tx queues (%u) exceeds dma capability\n",
7119 priv->plat->tx_queues_to_use);
7120 priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7121 }
7122
7123 if (priv->dma_cap.rx_fifo_size &&
7124 priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7125 dev_warn(priv->device,
7126 "Rx FIFO size (%u) exceeds dma capability\n",
7127 priv->plat->rx_fifo_size);
7128 priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7129 }
7130 if (priv->dma_cap.tx_fifo_size &&
7131 priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7132 dev_warn(priv->device,
7133 "Tx FIFO size (%u) exceeds dma capability\n",
7134 priv->plat->tx_fifo_size);
7135 priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7136 }
7137
7138 priv->hw->vlan_fail_q_en =
7139 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7140 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7141
7142 /* Run HW quirks, if any */
7143 if (priv->hwif_quirks) {
7144 ret = priv->hwif_quirks(priv);
7145 if (ret)
7146 return ret;
7147 }
7148
7149 /* Rx Watchdog is available in the COREs newer than the 3.40.
7150 * In some case, for example on bugged HW this feature
7151 * has to be disable and this can be done by passing the
7152 * riwt_off field from the platform.
7153 */
7154 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7155 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7156 priv->use_riwt = 1;
7157 dev_info(priv->device,
7158 "Enable RX Mitigation via HW Watchdog Timer\n");
7159 }
7160
7161 return 0;
7162 }
7163
stmmac_napi_add(struct net_device * dev)7164 static void stmmac_napi_add(struct net_device *dev)
7165 {
7166 struct stmmac_priv *priv = netdev_priv(dev);
7167 u32 queue, maxq;
7168
7169 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7170
7171 for (queue = 0; queue < maxq; queue++) {
7172 struct stmmac_channel *ch = &priv->channel[queue];
7173
7174 ch->priv_data = priv;
7175 ch->index = queue;
7176 spin_lock_init(&ch->lock);
7177
7178 if (queue < priv->plat->rx_queues_to_use) {
7179 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7180 }
7181 if (queue < priv->plat->tx_queues_to_use) {
7182 netif_napi_add_tx(dev, &ch->tx_napi,
7183 stmmac_napi_poll_tx);
7184 }
7185 if (queue < priv->plat->rx_queues_to_use &&
7186 queue < priv->plat->tx_queues_to_use) {
7187 netif_napi_add(dev, &ch->rxtx_napi,
7188 stmmac_napi_poll_rxtx);
7189 }
7190 }
7191 }
7192
stmmac_napi_del(struct net_device * dev)7193 static void stmmac_napi_del(struct net_device *dev)
7194 {
7195 struct stmmac_priv *priv = netdev_priv(dev);
7196 u32 queue, maxq;
7197
7198 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7199
7200 for (queue = 0; queue < maxq; queue++) {
7201 struct stmmac_channel *ch = &priv->channel[queue];
7202
7203 if (queue < priv->plat->rx_queues_to_use)
7204 netif_napi_del(&ch->rx_napi);
7205 if (queue < priv->plat->tx_queues_to_use)
7206 netif_napi_del(&ch->tx_napi);
7207 if (queue < priv->plat->rx_queues_to_use &&
7208 queue < priv->plat->tx_queues_to_use) {
7209 netif_napi_del(&ch->rxtx_napi);
7210 }
7211 }
7212 }
7213
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7214 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7215 {
7216 struct stmmac_priv *priv = netdev_priv(dev);
7217 int ret = 0, i;
7218 int max_speed;
7219
7220 if (netif_running(dev))
7221 stmmac_release(dev);
7222
7223 stmmac_napi_del(dev);
7224
7225 priv->plat->rx_queues_to_use = rx_cnt;
7226 priv->plat->tx_queues_to_use = tx_cnt;
7227 if (!netif_is_rxfh_configured(dev))
7228 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7229 priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7230 rx_cnt);
7231
7232 stmmac_mac_phylink_get_caps(priv);
7233
7234 priv->phylink_config.mac_capabilities = priv->hw->link.caps;
7235
7236 max_speed = priv->plat->max_speed;
7237 if (max_speed)
7238 phylink_limit_mac_speed(&priv->phylink_config, max_speed);
7239
7240 stmmac_napi_add(dev);
7241
7242 if (netif_running(dev))
7243 ret = stmmac_open(dev);
7244
7245 return ret;
7246 }
7247
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7248 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7249 {
7250 struct stmmac_priv *priv = netdev_priv(dev);
7251 int ret = 0;
7252
7253 if (netif_running(dev))
7254 stmmac_release(dev);
7255
7256 priv->dma_conf.dma_rx_size = rx_size;
7257 priv->dma_conf.dma_tx_size = tx_size;
7258
7259 if (netif_running(dev))
7260 ret = stmmac_open(dev);
7261
7262 return ret;
7263 }
7264
7265 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
stmmac_fpe_lp_task(struct work_struct * work)7266 static void stmmac_fpe_lp_task(struct work_struct *work)
7267 {
7268 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7269 fpe_task);
7270 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7271 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7272 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7273 bool *hs_enable = &fpe_cfg->hs_enable;
7274 bool *enable = &fpe_cfg->enable;
7275 int retries = 20;
7276
7277 while (retries-- > 0) {
7278 /* Bail out immediately if FPE handshake is OFF */
7279 if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7280 break;
7281
7282 if (*lo_state == FPE_STATE_ENTERING_ON &&
7283 *lp_state == FPE_STATE_ENTERING_ON) {
7284 stmmac_fpe_configure(priv, priv->ioaddr,
7285 fpe_cfg,
7286 priv->plat->tx_queues_to_use,
7287 priv->plat->rx_queues_to_use,
7288 *enable);
7289
7290 netdev_info(priv->dev, "configured FPE\n");
7291
7292 *lo_state = FPE_STATE_ON;
7293 *lp_state = FPE_STATE_ON;
7294 netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7295 break;
7296 }
7297
7298 if ((*lo_state == FPE_STATE_CAPABLE ||
7299 *lo_state == FPE_STATE_ENTERING_ON) &&
7300 *lp_state != FPE_STATE_ON) {
7301 netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7302 *lo_state, *lp_state);
7303 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7304 fpe_cfg,
7305 MPACKET_VERIFY);
7306 }
7307 /* Sleep then retry */
7308 msleep(500);
7309 }
7310
7311 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7312 }
7313
stmmac_fpe_handshake(struct stmmac_priv * priv,bool enable)7314 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7315 {
7316 if (priv->plat->fpe_cfg->hs_enable != enable) {
7317 if (enable) {
7318 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7319 priv->plat->fpe_cfg,
7320 MPACKET_VERIFY);
7321 } else {
7322 priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7323 priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7324 }
7325
7326 priv->plat->fpe_cfg->hs_enable = enable;
7327 }
7328 }
7329
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7330 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7331 {
7332 const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7333 struct dma_desc *desc_contains_ts = ctx->desc;
7334 struct stmmac_priv *priv = ctx->priv;
7335 struct dma_desc *ndesc = ctx->ndesc;
7336 struct dma_desc *desc = ctx->desc;
7337 u64 ns = 0;
7338
7339 if (!priv->hwts_rx_en)
7340 return -ENODATA;
7341
7342 /* For GMAC4, the valid timestamp is from CTX next desc. */
7343 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7344 desc_contains_ts = ndesc;
7345
7346 /* Check if timestamp is available */
7347 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7348 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7349 ns -= priv->plat->cdc_error_adj;
7350 *timestamp = ns_to_ktime(ns);
7351 return 0;
7352 }
7353
7354 return -ENODATA;
7355 }
7356
7357 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7358 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
7359 };
7360
7361 /**
7362 * stmmac_dvr_probe
7363 * @device: device pointer
7364 * @plat_dat: platform data pointer
7365 * @res: stmmac resource pointer
7366 * Description: this is the main probe function used to
7367 * call the alloc_etherdev, allocate the priv structure.
7368 * Return:
7369 * returns 0 on success, otherwise errno.
7370 */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7371 int stmmac_dvr_probe(struct device *device,
7372 struct plat_stmmacenet_data *plat_dat,
7373 struct stmmac_resources *res)
7374 {
7375 struct net_device *ndev = NULL;
7376 struct stmmac_priv *priv;
7377 u32 rxq;
7378 int i, ret = 0;
7379
7380 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7381 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7382 if (!ndev)
7383 return -ENOMEM;
7384
7385 SET_NETDEV_DEV(ndev, device);
7386
7387 priv = netdev_priv(ndev);
7388 priv->device = device;
7389 priv->dev = ndev;
7390
7391 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7392 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7393 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7394 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7395 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7396 }
7397
7398 priv->xstats.pcpu_stats =
7399 devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7400 if (!priv->xstats.pcpu_stats)
7401 return -ENOMEM;
7402
7403 stmmac_set_ethtool_ops(ndev);
7404 priv->pause = pause;
7405 priv->plat = plat_dat;
7406 priv->ioaddr = res->addr;
7407 priv->dev->base_addr = (unsigned long)res->addr;
7408 priv->plat->dma_cfg->multi_msi_en =
7409 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7410
7411 priv->dev->irq = res->irq;
7412 priv->wol_irq = res->wol_irq;
7413 priv->lpi_irq = res->lpi_irq;
7414 priv->sfty_ce_irq = res->sfty_ce_irq;
7415 priv->sfty_ue_irq = res->sfty_ue_irq;
7416 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7417 priv->rx_irq[i] = res->rx_irq[i];
7418 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7419 priv->tx_irq[i] = res->tx_irq[i];
7420
7421 if (!is_zero_ether_addr(res->mac))
7422 eth_hw_addr_set(priv->dev, res->mac);
7423
7424 dev_set_drvdata(device, priv->dev);
7425
7426 /* Verify driver arguments */
7427 stmmac_verify_args();
7428
7429 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7430 if (!priv->af_xdp_zc_qps)
7431 return -ENOMEM;
7432
7433 /* Allocate workqueue */
7434 priv->wq = create_singlethread_workqueue("stmmac_wq");
7435 if (!priv->wq) {
7436 dev_err(priv->device, "failed to create workqueue\n");
7437 ret = -ENOMEM;
7438 goto error_wq_init;
7439 }
7440
7441 INIT_WORK(&priv->service_task, stmmac_service_task);
7442
7443 /* Initialize Link Partner FPE workqueue */
7444 INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7445
7446 /* Override with kernel parameters if supplied XXX CRS XXX
7447 * this needs to have multiple instances
7448 */
7449 if ((phyaddr >= 0) && (phyaddr <= 31))
7450 priv->plat->phy_addr = phyaddr;
7451
7452 if (priv->plat->stmmac_rst) {
7453 ret = reset_control_assert(priv->plat->stmmac_rst);
7454 reset_control_deassert(priv->plat->stmmac_rst);
7455 /* Some reset controllers have only reset callback instead of
7456 * assert + deassert callbacks pair.
7457 */
7458 if (ret == -ENOTSUPP)
7459 reset_control_reset(priv->plat->stmmac_rst);
7460 }
7461
7462 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7463 if (ret == -ENOTSUPP)
7464 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7465 ERR_PTR(ret));
7466
7467 /* Wait a bit for the reset to take effect */
7468 udelay(10);
7469
7470 /* Init MAC and get the capabilities */
7471 ret = stmmac_hw_init(priv);
7472 if (ret)
7473 goto error_hw_init;
7474
7475 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7476 */
7477 if (priv->synopsys_id < DWMAC_CORE_5_20)
7478 priv->plat->dma_cfg->dche = false;
7479
7480 stmmac_check_ether_addr(priv);
7481
7482 ndev->netdev_ops = &stmmac_netdev_ops;
7483
7484 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7485
7486 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7487 NETIF_F_RXCSUM;
7488 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7489 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7490
7491 ret = stmmac_tc_init(priv, priv);
7492 if (!ret) {
7493 ndev->hw_features |= NETIF_F_HW_TC;
7494 }
7495
7496 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7497 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7498 if (priv->plat->has_gmac4)
7499 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7500 priv->tso = true;
7501 dev_info(priv->device, "TSO feature enabled\n");
7502 }
7503
7504 if (priv->dma_cap.sphen &&
7505 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7506 ndev->hw_features |= NETIF_F_GRO;
7507 priv->sph_cap = true;
7508 priv->sph = priv->sph_cap;
7509 dev_info(priv->device, "SPH feature enabled\n");
7510 }
7511
7512 /* Ideally our host DMA address width is the same as for the
7513 * device. However, it may differ and then we have to use our
7514 * host DMA width for allocation and the device DMA width for
7515 * register handling.
7516 */
7517 if (priv->plat->host_dma_width)
7518 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7519 else
7520 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7521
7522 if (priv->dma_cap.host_dma_width) {
7523 ret = dma_set_mask_and_coherent(device,
7524 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7525 if (!ret) {
7526 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7527 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7528
7529 /*
7530 * If more than 32 bits can be addressed, make sure to
7531 * enable enhanced addressing mode.
7532 */
7533 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7534 priv->plat->dma_cfg->eame = true;
7535 } else {
7536 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7537 if (ret) {
7538 dev_err(priv->device, "Failed to set DMA Mask\n");
7539 goto error_hw_init;
7540 }
7541
7542 priv->dma_cap.host_dma_width = 32;
7543 }
7544 }
7545
7546 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7547 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7548 #ifdef STMMAC_VLAN_TAG_USED
7549 /* Both mac100 and gmac support receive VLAN tag detection */
7550 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7551 if (priv->dma_cap.vlhash) {
7552 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7553 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7554 }
7555 if (priv->dma_cap.vlins) {
7556 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7557 if (priv->dma_cap.dvlan)
7558 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7559 }
7560 #endif
7561 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7562
7563 priv->xstats.threshold = tc;
7564
7565 /* Initialize RSS */
7566 rxq = priv->plat->rx_queues_to_use;
7567 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7568 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7569 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7570
7571 if (priv->dma_cap.rssen && priv->plat->rss_en)
7572 ndev->features |= NETIF_F_RXHASH;
7573
7574 ndev->vlan_features |= ndev->features;
7575 /* TSO doesn't work on VLANs yet */
7576 ndev->vlan_features &= ~NETIF_F_TSO;
7577
7578 /* MTU range: 46 - hw-specific max */
7579 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7580 if (priv->plat->has_xgmac)
7581 ndev->max_mtu = XGMAC_JUMBO_LEN;
7582 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7583 ndev->max_mtu = JUMBO_LEN;
7584 else
7585 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7586 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7587 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7588 */
7589 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7590 (priv->plat->maxmtu >= ndev->min_mtu))
7591 ndev->max_mtu = priv->plat->maxmtu;
7592 else if (priv->plat->maxmtu < ndev->min_mtu)
7593 dev_warn(priv->device,
7594 "%s: warning: maxmtu having invalid value (%d)\n",
7595 __func__, priv->plat->maxmtu);
7596
7597 if (flow_ctrl)
7598 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
7599
7600 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7601
7602 /* Setup channels NAPI */
7603 stmmac_napi_add(ndev);
7604
7605 mutex_init(&priv->lock);
7606
7607 /* If a specific clk_csr value is passed from the platform
7608 * this means that the CSR Clock Range selection cannot be
7609 * changed at run-time and it is fixed. Viceversa the driver'll try to
7610 * set the MDC clock dynamically according to the csr actual
7611 * clock input.
7612 */
7613 if (priv->plat->clk_csr >= 0)
7614 priv->clk_csr = priv->plat->clk_csr;
7615 else
7616 stmmac_clk_csr_set(priv);
7617
7618 stmmac_check_pcs_mode(priv);
7619
7620 pm_runtime_get_noresume(device);
7621 pm_runtime_set_active(device);
7622 if (!pm_runtime_enabled(device))
7623 pm_runtime_enable(device);
7624
7625 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7626 priv->hw->pcs != STMMAC_PCS_RTBI) {
7627 /* MDIO bus Registration */
7628 ret = stmmac_mdio_register(ndev);
7629 if (ret < 0) {
7630 dev_err_probe(priv->device, ret,
7631 "%s: MDIO bus (id: %d) registration failed\n",
7632 __func__, priv->plat->bus_id);
7633 goto error_mdio_register;
7634 }
7635 }
7636
7637 if (priv->plat->speed_mode_2500)
7638 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7639
7640 if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7641 ret = stmmac_xpcs_setup(priv->mii);
7642 if (ret)
7643 goto error_xpcs_setup;
7644 }
7645
7646 ret = stmmac_phy_setup(priv);
7647 if (ret) {
7648 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7649 goto error_phy_setup;
7650 }
7651
7652 ret = register_netdev(ndev);
7653 if (ret) {
7654 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7655 __func__, ret);
7656 goto error_netdev_register;
7657 }
7658
7659 #ifdef CONFIG_DEBUG_FS
7660 stmmac_init_fs(ndev);
7661 #endif
7662
7663 if (priv->plat->dump_debug_regs)
7664 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7665
7666 /* Let pm_runtime_put() disable the clocks.
7667 * If CONFIG_PM is not enabled, the clocks will stay powered.
7668 */
7669 pm_runtime_put(device);
7670
7671 return ret;
7672
7673 error_netdev_register:
7674 phylink_destroy(priv->phylink);
7675 error_xpcs_setup:
7676 error_phy_setup:
7677 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7678 priv->hw->pcs != STMMAC_PCS_RTBI)
7679 stmmac_mdio_unregister(ndev);
7680 error_mdio_register:
7681 stmmac_napi_del(ndev);
7682 error_hw_init:
7683 destroy_workqueue(priv->wq);
7684 error_wq_init:
7685 bitmap_free(priv->af_xdp_zc_qps);
7686
7687 return ret;
7688 }
7689 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7690
7691 /**
7692 * stmmac_dvr_remove
7693 * @dev: device pointer
7694 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7695 * changes the link status, releases the DMA descriptor rings.
7696 */
stmmac_dvr_remove(struct device * dev)7697 void stmmac_dvr_remove(struct device *dev)
7698 {
7699 struct net_device *ndev = dev_get_drvdata(dev);
7700 struct stmmac_priv *priv = netdev_priv(ndev);
7701
7702 netdev_info(priv->dev, "%s: removing driver", __func__);
7703
7704 pm_runtime_get_sync(dev);
7705
7706 stmmac_stop_all_dma(priv);
7707 stmmac_mac_set(priv, priv->ioaddr, false);
7708 netif_carrier_off(ndev);
7709 unregister_netdev(ndev);
7710
7711 #ifdef CONFIG_DEBUG_FS
7712 stmmac_exit_fs(ndev);
7713 #endif
7714 phylink_destroy(priv->phylink);
7715 if (priv->plat->stmmac_rst)
7716 reset_control_assert(priv->plat->stmmac_rst);
7717 reset_control_assert(priv->plat->stmmac_ahb_rst);
7718 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7719 priv->hw->pcs != STMMAC_PCS_RTBI)
7720 stmmac_mdio_unregister(ndev);
7721 destroy_workqueue(priv->wq);
7722 mutex_destroy(&priv->lock);
7723 bitmap_free(priv->af_xdp_zc_qps);
7724
7725 pm_runtime_disable(dev);
7726 pm_runtime_put_noidle(dev);
7727 }
7728 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7729
7730 /**
7731 * stmmac_suspend - suspend callback
7732 * @dev: device pointer
7733 * Description: this is the function to suspend the device and it is called
7734 * by the platform driver to stop the network queue, release the resources,
7735 * program the PMT register (for WoL), clean and release driver resources.
7736 */
stmmac_suspend(struct device * dev)7737 int stmmac_suspend(struct device *dev)
7738 {
7739 struct net_device *ndev = dev_get_drvdata(dev);
7740 struct stmmac_priv *priv = netdev_priv(ndev);
7741 u32 chan;
7742
7743 if (!ndev || !netif_running(ndev))
7744 return 0;
7745
7746 mutex_lock(&priv->lock);
7747
7748 netif_device_detach(ndev);
7749
7750 stmmac_disable_all_queues(priv);
7751
7752 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7753 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7754
7755 if (priv->eee_enabled) {
7756 priv->tx_path_in_lpi_mode = false;
7757 del_timer_sync(&priv->eee_ctrl_timer);
7758 }
7759
7760 /* Stop TX/RX DMA */
7761 stmmac_stop_all_dma(priv);
7762
7763 if (priv->plat->serdes_powerdown)
7764 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7765
7766 /* Enable Power down mode by programming the PMT regs */
7767 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7768 stmmac_pmt(priv, priv->hw, priv->wolopts);
7769 priv->irq_wake = 1;
7770 } else {
7771 stmmac_mac_set(priv, priv->ioaddr, false);
7772 pinctrl_pm_select_sleep_state(priv->device);
7773 }
7774
7775 mutex_unlock(&priv->lock);
7776
7777 rtnl_lock();
7778 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7779 phylink_suspend(priv->phylink, true);
7780 } else {
7781 if (device_may_wakeup(priv->device))
7782 phylink_speed_down(priv->phylink, false);
7783 phylink_suspend(priv->phylink, false);
7784 }
7785 rtnl_unlock();
7786
7787 if (priv->dma_cap.fpesel) {
7788 /* Disable FPE */
7789 stmmac_fpe_configure(priv, priv->ioaddr,
7790 priv->plat->fpe_cfg,
7791 priv->plat->tx_queues_to_use,
7792 priv->plat->rx_queues_to_use, false);
7793
7794 stmmac_fpe_handshake(priv, false);
7795 stmmac_fpe_stop_wq(priv);
7796 }
7797
7798 priv->speed = SPEED_UNKNOWN;
7799 return 0;
7800 }
7801 EXPORT_SYMBOL_GPL(stmmac_suspend);
7802
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7803 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7804 {
7805 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7806
7807 rx_q->cur_rx = 0;
7808 rx_q->dirty_rx = 0;
7809 }
7810
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7811 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7812 {
7813 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7814
7815 tx_q->cur_tx = 0;
7816 tx_q->dirty_tx = 0;
7817 tx_q->mss = 0;
7818
7819 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7820 }
7821
7822 /**
7823 * stmmac_reset_queues_param - reset queue parameters
7824 * @priv: device pointer
7825 */
stmmac_reset_queues_param(struct stmmac_priv * priv)7826 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7827 {
7828 u32 rx_cnt = priv->plat->rx_queues_to_use;
7829 u32 tx_cnt = priv->plat->tx_queues_to_use;
7830 u32 queue;
7831
7832 for (queue = 0; queue < rx_cnt; queue++)
7833 stmmac_reset_rx_queue(priv, queue);
7834
7835 for (queue = 0; queue < tx_cnt; queue++)
7836 stmmac_reset_tx_queue(priv, queue);
7837 }
7838
7839 /**
7840 * stmmac_resume - resume callback
7841 * @dev: device pointer
7842 * Description: when resume this function is invoked to setup the DMA and CORE
7843 * in a usable state.
7844 */
stmmac_resume(struct device * dev)7845 int stmmac_resume(struct device *dev)
7846 {
7847 struct net_device *ndev = dev_get_drvdata(dev);
7848 struct stmmac_priv *priv = netdev_priv(ndev);
7849 int ret;
7850
7851 if (!netif_running(ndev))
7852 return 0;
7853
7854 /* Power Down bit, into the PM register, is cleared
7855 * automatically as soon as a magic packet or a Wake-up frame
7856 * is received. Anyway, it's better to manually clear
7857 * this bit because it can generate problems while resuming
7858 * from another devices (e.g. serial console).
7859 */
7860 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7861 mutex_lock(&priv->lock);
7862 stmmac_pmt(priv, priv->hw, 0);
7863 mutex_unlock(&priv->lock);
7864 priv->irq_wake = 0;
7865 } else {
7866 pinctrl_pm_select_default_state(priv->device);
7867 /* reset the phy so that it's ready */
7868 if (priv->mii)
7869 stmmac_mdio_reset(priv->mii);
7870 }
7871
7872 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7873 priv->plat->serdes_powerup) {
7874 ret = priv->plat->serdes_powerup(ndev,
7875 priv->plat->bsp_priv);
7876
7877 if (ret < 0)
7878 return ret;
7879 }
7880
7881 rtnl_lock();
7882 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7883 phylink_resume(priv->phylink);
7884 } else {
7885 phylink_resume(priv->phylink);
7886 if (device_may_wakeup(priv->device))
7887 phylink_speed_up(priv->phylink);
7888 }
7889 rtnl_unlock();
7890
7891 rtnl_lock();
7892 mutex_lock(&priv->lock);
7893
7894 stmmac_reset_queues_param(priv);
7895
7896 stmmac_free_tx_skbufs(priv);
7897 stmmac_clear_descriptors(priv, &priv->dma_conf);
7898
7899 stmmac_hw_setup(ndev, false);
7900 stmmac_init_coalesce(priv);
7901 stmmac_set_rx_mode(ndev);
7902
7903 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7904
7905 stmmac_enable_all_queues(priv);
7906 stmmac_enable_all_dma_irq(priv);
7907
7908 mutex_unlock(&priv->lock);
7909 rtnl_unlock();
7910
7911 netif_device_attach(ndev);
7912
7913 return 0;
7914 }
7915 EXPORT_SYMBOL_GPL(stmmac_resume);
7916
7917 #ifndef MODULE
stmmac_cmdline_opt(char * str)7918 static int __init stmmac_cmdline_opt(char *str)
7919 {
7920 char *opt;
7921
7922 if (!str || !*str)
7923 return 1;
7924 while ((opt = strsep(&str, ",")) != NULL) {
7925 if (!strncmp(opt, "debug:", 6)) {
7926 if (kstrtoint(opt + 6, 0, &debug))
7927 goto err;
7928 } else if (!strncmp(opt, "phyaddr:", 8)) {
7929 if (kstrtoint(opt + 8, 0, &phyaddr))
7930 goto err;
7931 } else if (!strncmp(opt, "buf_sz:", 7)) {
7932 if (kstrtoint(opt + 7, 0, &buf_sz))
7933 goto err;
7934 } else if (!strncmp(opt, "tc:", 3)) {
7935 if (kstrtoint(opt + 3, 0, &tc))
7936 goto err;
7937 } else if (!strncmp(opt, "watchdog:", 9)) {
7938 if (kstrtoint(opt + 9, 0, &watchdog))
7939 goto err;
7940 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7941 if (kstrtoint(opt + 10, 0, &flow_ctrl))
7942 goto err;
7943 } else if (!strncmp(opt, "pause:", 6)) {
7944 if (kstrtoint(opt + 6, 0, &pause))
7945 goto err;
7946 } else if (!strncmp(opt, "eee_timer:", 10)) {
7947 if (kstrtoint(opt + 10, 0, &eee_timer))
7948 goto err;
7949 } else if (!strncmp(opt, "chain_mode:", 11)) {
7950 if (kstrtoint(opt + 11, 0, &chain_mode))
7951 goto err;
7952 }
7953 }
7954 return 1;
7955
7956 err:
7957 pr_err("%s: ERROR broken module parameter conversion", __func__);
7958 return 1;
7959 }
7960
7961 __setup("stmmaceth=", stmmac_cmdline_opt);
7962 #endif /* MODULE */
7963
stmmac_init(void)7964 static int __init stmmac_init(void)
7965 {
7966 #ifdef CONFIG_DEBUG_FS
7967 /* Create debugfs main directory if it doesn't exist yet */
7968 if (!stmmac_fs_dir)
7969 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7970 register_netdevice_notifier(&stmmac_notifier);
7971 #endif
7972
7973 return 0;
7974 }
7975
stmmac_exit(void)7976 static void __exit stmmac_exit(void)
7977 {
7978 #ifdef CONFIG_DEBUG_FS
7979 unregister_netdevice_notifier(&stmmac_notifier);
7980 debugfs_remove_recursive(stmmac_fs_dir);
7981 #endif
7982 }
7983
7984 module_init(stmmac_init)
7985 module_exit(stmmac_exit)
7986
7987 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7988 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7989 MODULE_LICENSE("GPL");
7990