1 /*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117 #include <linux/module.h>
118 #include <linux/spinlock.h>
119 #include <linux/tcp.h>
120 #include <linux/if_vlan.h>
121 #include <linux/interrupt.h>
122 #include <linux/clk.h>
123 #include <linux/if_ether.h>
124 #include <linux/net_tstamp.h>
125 #include <linux/phy.h>
126 #include <net/vxlan.h>
127
128 #include "xgbe.h"
129 #include "xgbe-common.h"
130
131 static unsigned int ecc_sec_info_threshold = 10;
132 static unsigned int ecc_sec_warn_threshold = 10000;
133 static unsigned int ecc_sec_period = 600;
134 static unsigned int ecc_ded_threshold = 2;
135 static unsigned int ecc_ded_period = 600;
136
137 #ifdef CONFIG_AMD_XGBE_HAVE_ECC
138 /* Only expose the ECC parameters if supported */
139 module_param(ecc_sec_info_threshold, uint, 0644);
140 MODULE_PARM_DESC(ecc_sec_info_threshold,
141 " ECC corrected error informational threshold setting");
142
143 module_param(ecc_sec_warn_threshold, uint, 0644);
144 MODULE_PARM_DESC(ecc_sec_warn_threshold,
145 " ECC corrected error warning threshold setting");
146
147 module_param(ecc_sec_period, uint, 0644);
148 MODULE_PARM_DESC(ecc_sec_period, " ECC corrected error period (in seconds)");
149
150 module_param(ecc_ded_threshold, uint, 0644);
151 MODULE_PARM_DESC(ecc_ded_threshold, " ECC detected error threshold setting");
152
153 module_param(ecc_ded_period, uint, 0644);
154 MODULE_PARM_DESC(ecc_ded_period, " ECC detected error period (in seconds)");
155 #endif
156
157 static int xgbe_one_poll(struct napi_struct *, int);
158 static int xgbe_all_poll(struct napi_struct *, int);
159 static void xgbe_stop(struct xgbe_prv_data *);
160
xgbe_alloc_node(size_t size,int node)161 static void *xgbe_alloc_node(size_t size, int node)
162 {
163 void *mem;
164
165 mem = kzalloc_node(size, GFP_KERNEL, node);
166 if (!mem)
167 mem = kzalloc(size, GFP_KERNEL);
168
169 return mem;
170 }
171
xgbe_free_channels(struct xgbe_prv_data * pdata)172 static void xgbe_free_channels(struct xgbe_prv_data *pdata)
173 {
174 unsigned int i;
175
176 for (i = 0; i < ARRAY_SIZE(pdata->channel); i++) {
177 if (!pdata->channel[i])
178 continue;
179
180 kfree(pdata->channel[i]->rx_ring);
181 kfree(pdata->channel[i]->tx_ring);
182 kfree(pdata->channel[i]);
183
184 pdata->channel[i] = NULL;
185 }
186
187 pdata->channel_count = 0;
188 }
189
xgbe_alloc_channels(struct xgbe_prv_data * pdata)190 static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
191 {
192 struct xgbe_channel *channel;
193 struct xgbe_ring *ring;
194 unsigned int count, i;
195 unsigned int cpu;
196 int node;
197
198 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
199 for (i = 0; i < count; i++) {
200 /* Attempt to use a CPU on the node the device is on */
201 cpu = cpumask_local_spread(i, dev_to_node(pdata->dev));
202
203 /* Set the allocation node based on the returned CPU */
204 node = cpu_to_node(cpu);
205
206 channel = xgbe_alloc_node(sizeof(*channel), node);
207 if (!channel)
208 goto err_mem;
209 pdata->channel[i] = channel;
210
211 snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
212 channel->pdata = pdata;
213 channel->queue_index = i;
214 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
215 (DMA_CH_INC * i);
216 channel->node = node;
217 cpumask_set_cpu(cpu, &channel->affinity_mask);
218
219 if (pdata->per_channel_irq)
220 channel->dma_irq = pdata->channel_irq[i];
221
222 if (i < pdata->tx_ring_count) {
223 ring = xgbe_alloc_node(sizeof(*ring), node);
224 if (!ring)
225 goto err_mem;
226
227 spin_lock_init(&ring->lock);
228 ring->node = node;
229
230 channel->tx_ring = ring;
231 }
232
233 if (i < pdata->rx_ring_count) {
234 ring = xgbe_alloc_node(sizeof(*ring), node);
235 if (!ring)
236 goto err_mem;
237
238 spin_lock_init(&ring->lock);
239 ring->node = node;
240
241 channel->rx_ring = ring;
242 }
243
244 netif_dbg(pdata, drv, pdata->netdev,
245 "%s: cpu=%u, node=%d\n", channel->name, cpu, node);
246
247 netif_dbg(pdata, drv, pdata->netdev,
248 "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
249 channel->name, channel->dma_regs, channel->dma_irq,
250 channel->tx_ring, channel->rx_ring);
251 }
252
253 pdata->channel_count = count;
254
255 return 0;
256
257 err_mem:
258 xgbe_free_channels(pdata);
259
260 return -ENOMEM;
261 }
262
xgbe_tx_avail_desc(struct xgbe_ring * ring)263 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
264 {
265 return (ring->rdesc_count - (ring->cur - ring->dirty));
266 }
267
xgbe_rx_dirty_desc(struct xgbe_ring * ring)268 static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
269 {
270 return (ring->cur - ring->dirty);
271 }
272
xgbe_maybe_stop_tx_queue(struct xgbe_channel * channel,struct xgbe_ring * ring,unsigned int count)273 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
274 struct xgbe_ring *ring, unsigned int count)
275 {
276 struct xgbe_prv_data *pdata = channel->pdata;
277
278 if (count > xgbe_tx_avail_desc(ring)) {
279 netif_info(pdata, drv, pdata->netdev,
280 "Tx queue stopped, not enough descriptors available\n");
281 netif_stop_subqueue(pdata->netdev, channel->queue_index);
282 ring->tx.queue_stopped = 1;
283
284 /* If we haven't notified the hardware because of xmit_more
285 * support, tell it now
286 */
287 if (ring->tx.xmit_more)
288 pdata->hw_if.tx_start_xmit(channel, ring);
289
290 return NETDEV_TX_BUSY;
291 }
292
293 return 0;
294 }
295
xgbe_calc_rx_buf_size(struct net_device * netdev,unsigned int mtu)296 static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
297 {
298 unsigned int rx_buf_size;
299
300 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
301 rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
302
303 rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
304 ~(XGBE_RX_BUF_ALIGN - 1);
305
306 return rx_buf_size;
307 }
308
xgbe_enable_rx_tx_int(struct xgbe_prv_data * pdata,struct xgbe_channel * channel)309 static void xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata,
310 struct xgbe_channel *channel)
311 {
312 struct xgbe_hw_if *hw_if = &pdata->hw_if;
313 enum xgbe_int int_id;
314
315 if (channel->tx_ring && channel->rx_ring)
316 int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
317 else if (channel->tx_ring)
318 int_id = XGMAC_INT_DMA_CH_SR_TI;
319 else if (channel->rx_ring)
320 int_id = XGMAC_INT_DMA_CH_SR_RI;
321 else
322 return;
323
324 hw_if->enable_int(channel, int_id);
325 }
326
xgbe_enable_rx_tx_ints(struct xgbe_prv_data * pdata)327 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
328 {
329 unsigned int i;
330
331 for (i = 0; i < pdata->channel_count; i++)
332 xgbe_enable_rx_tx_int(pdata, pdata->channel[i]);
333 }
334
xgbe_disable_rx_tx_int(struct xgbe_prv_data * pdata,struct xgbe_channel * channel)335 static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
336 struct xgbe_channel *channel)
337 {
338 struct xgbe_hw_if *hw_if = &pdata->hw_if;
339 enum xgbe_int int_id;
340
341 if (channel->tx_ring && channel->rx_ring)
342 int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
343 else if (channel->tx_ring)
344 int_id = XGMAC_INT_DMA_CH_SR_TI;
345 else if (channel->rx_ring)
346 int_id = XGMAC_INT_DMA_CH_SR_RI;
347 else
348 return;
349
350 hw_if->disable_int(channel, int_id);
351 }
352
xgbe_disable_rx_tx_ints(struct xgbe_prv_data * pdata)353 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
354 {
355 unsigned int i;
356
357 for (i = 0; i < pdata->channel_count; i++)
358 xgbe_disable_rx_tx_int(pdata, pdata->channel[i]);
359 }
360
xgbe_ecc_sec(struct xgbe_prv_data * pdata,unsigned long * period,unsigned int * count,const char * area)361 static bool xgbe_ecc_sec(struct xgbe_prv_data *pdata, unsigned long *period,
362 unsigned int *count, const char *area)
363 {
364 if (time_before(jiffies, *period)) {
365 (*count)++;
366 } else {
367 *period = jiffies + (ecc_sec_period * HZ);
368 *count = 1;
369 }
370
371 if (*count > ecc_sec_info_threshold)
372 dev_warn_once(pdata->dev,
373 "%s ECC corrected errors exceed informational threshold\n",
374 area);
375
376 if (*count > ecc_sec_warn_threshold) {
377 dev_warn_once(pdata->dev,
378 "%s ECC corrected errors exceed warning threshold\n",
379 area);
380 return true;
381 }
382
383 return false;
384 }
385
xgbe_ecc_ded(struct xgbe_prv_data * pdata,unsigned long * period,unsigned int * count,const char * area)386 static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
387 unsigned int *count, const char *area)
388 {
389 if (time_before(jiffies, *period)) {
390 (*count)++;
391 } else {
392 *period = jiffies + (ecc_ded_period * HZ);
393 *count = 1;
394 }
395
396 if (*count > ecc_ded_threshold) {
397 netdev_alert(pdata->netdev,
398 "%s ECC detected errors exceed threshold\n",
399 area);
400 return true;
401 }
402
403 return false;
404 }
405
xgbe_ecc_isr_task(struct tasklet_struct * t)406 static void xgbe_ecc_isr_task(struct tasklet_struct *t)
407 {
408 struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_ecc);
409 unsigned int ecc_isr;
410 bool stop = false;
411
412 /* Mask status with only the interrupts we care about */
413 ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
414 ecc_isr &= XP_IOREAD(pdata, XP_ECC_IER);
415 netif_dbg(pdata, intr, pdata->netdev, "ECC_ISR=%#010x\n", ecc_isr);
416
417 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_DED)) {
418 stop |= xgbe_ecc_ded(pdata, &pdata->tx_ded_period,
419 &pdata->tx_ded_count, "TX fifo");
420 }
421
422 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_DED)) {
423 stop |= xgbe_ecc_ded(pdata, &pdata->rx_ded_period,
424 &pdata->rx_ded_count, "RX fifo");
425 }
426
427 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_DED)) {
428 stop |= xgbe_ecc_ded(pdata, &pdata->desc_ded_period,
429 &pdata->desc_ded_count,
430 "descriptor cache");
431 }
432
433 if (stop) {
434 pdata->hw_if.disable_ecc_ded(pdata);
435 schedule_work(&pdata->stopdev_work);
436 goto out;
437 }
438
439 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_SEC)) {
440 if (xgbe_ecc_sec(pdata, &pdata->tx_sec_period,
441 &pdata->tx_sec_count, "TX fifo"))
442 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_TX);
443 }
444
445 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_SEC))
446 if (xgbe_ecc_sec(pdata, &pdata->rx_sec_period,
447 &pdata->rx_sec_count, "RX fifo"))
448 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_RX);
449
450 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_SEC))
451 if (xgbe_ecc_sec(pdata, &pdata->desc_sec_period,
452 &pdata->desc_sec_count, "descriptor cache"))
453 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_DESC);
454
455 out:
456 /* Clear all ECC interrupts */
457 XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
458
459 /* Reissue interrupt if status is not clear */
460 if (pdata->vdata->irq_reissue_support)
461 XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 1);
462 }
463
xgbe_ecc_isr(int irq,void * data)464 static irqreturn_t xgbe_ecc_isr(int irq, void *data)
465 {
466 struct xgbe_prv_data *pdata = data;
467
468 if (pdata->isr_as_tasklet)
469 tasklet_schedule(&pdata->tasklet_ecc);
470 else
471 xgbe_ecc_isr_task(&pdata->tasklet_ecc);
472
473 return IRQ_HANDLED;
474 }
475
xgbe_isr_task(struct tasklet_struct * t)476 static void xgbe_isr_task(struct tasklet_struct *t)
477 {
478 struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_dev);
479 struct xgbe_hw_if *hw_if = &pdata->hw_if;
480 struct xgbe_channel *channel;
481 unsigned int dma_isr, dma_ch_isr;
482 unsigned int mac_isr, mac_tssr, mac_mdioisr;
483 unsigned int i;
484
485 /* The DMA interrupt status register also reports MAC and MTL
486 * interrupts. So for polling mode, we just need to check for
487 * this register to be non-zero
488 */
489 dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
490 if (!dma_isr)
491 goto isr_done;
492
493 netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
494
495 for (i = 0; i < pdata->channel_count; i++) {
496 if (!(dma_isr & (1 << i)))
497 continue;
498
499 channel = pdata->channel[i];
500
501 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
502 netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
503 i, dma_ch_isr);
504
505 /* The TI or RI interrupt bits may still be set even if using
506 * per channel DMA interrupts. Check to be sure those are not
507 * enabled before using the private data napi structure.
508 */
509 if (!pdata->per_channel_irq &&
510 (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
511 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
512 if (napi_schedule_prep(&pdata->napi)) {
513 /* Disable Tx and Rx interrupts */
514 xgbe_disable_rx_tx_ints(pdata);
515
516 /* Turn on polling */
517 __napi_schedule(&pdata->napi);
518 }
519 } else {
520 /* Don't clear Rx/Tx status if doing per channel DMA
521 * interrupts, these will be cleared by the ISR for
522 * per channel DMA interrupts.
523 */
524 XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0);
525 XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0);
526 }
527
528 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
529 pdata->ext_stats.rx_buffer_unavailable++;
530
531 /* Restart the device on a Fatal Bus Error */
532 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
533 schedule_work(&pdata->restart_work);
534
535 /* Clear interrupt signals */
536 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
537 }
538
539 if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
540 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
541
542 netif_dbg(pdata, intr, pdata->netdev, "MAC_ISR=%#010x\n",
543 mac_isr);
544
545 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
546 hw_if->tx_mmc_int(pdata);
547
548 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
549 hw_if->rx_mmc_int(pdata);
550
551 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
552 mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
553
554 netif_dbg(pdata, intr, pdata->netdev,
555 "MAC_TSSR=%#010x\n", mac_tssr);
556
557 if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
558 /* Read Tx Timestamp to clear interrupt */
559 pdata->tx_tstamp =
560 hw_if->get_tx_tstamp(pdata);
561 queue_work(pdata->dev_workqueue,
562 &pdata->tx_tstamp_work);
563 }
564 }
565
566 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, SMI)) {
567 mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR);
568
569 netif_dbg(pdata, intr, pdata->netdev,
570 "MAC_MDIOISR=%#010x\n", mac_mdioisr);
571
572 if (XGMAC_GET_BITS(mac_mdioisr, MAC_MDIOISR,
573 SNGLCOMPINT))
574 complete(&pdata->mdio_complete);
575 }
576 }
577
578 isr_done:
579 /* If there is not a separate AN irq, handle it here */
580 if (pdata->dev_irq == pdata->an_irq)
581 pdata->phy_if.an_isr(pdata);
582
583 /* If there is not a separate ECC irq, handle it here */
584 if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
585 xgbe_ecc_isr_task(&pdata->tasklet_ecc);
586
587 /* If there is not a separate I2C irq, handle it here */
588 if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
589 pdata->i2c_if.i2c_isr(pdata);
590
591 /* Reissue interrupt if status is not clear */
592 if (pdata->vdata->irq_reissue_support) {
593 unsigned int reissue_mask;
594
595 reissue_mask = 1 << 0;
596 if (!pdata->per_channel_irq)
597 reissue_mask |= 0xffff << 4;
598
599 XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask);
600 }
601 }
602
xgbe_isr(int irq,void * data)603 static irqreturn_t xgbe_isr(int irq, void *data)
604 {
605 struct xgbe_prv_data *pdata = data;
606
607 if (pdata->isr_as_tasklet)
608 tasklet_schedule(&pdata->tasklet_dev);
609 else
610 xgbe_isr_task(&pdata->tasklet_dev);
611
612 return IRQ_HANDLED;
613 }
614
xgbe_dma_isr(int irq,void * data)615 static irqreturn_t xgbe_dma_isr(int irq, void *data)
616 {
617 struct xgbe_channel *channel = data;
618 struct xgbe_prv_data *pdata = channel->pdata;
619 unsigned int dma_status;
620
621 /* Per channel DMA interrupts are enabled, so we use the per
622 * channel napi structure and not the private data napi structure
623 */
624 if (napi_schedule_prep(&channel->napi)) {
625 /* Disable Tx and Rx interrupts */
626 if (pdata->channel_irq_mode)
627 xgbe_disable_rx_tx_int(pdata, channel);
628 else
629 disable_irq_nosync(channel->dma_irq);
630
631 /* Turn on polling */
632 __napi_schedule_irqoff(&channel->napi);
633 }
634
635 /* Clear Tx/Rx signals */
636 dma_status = 0;
637 XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1);
638 XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1);
639 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status);
640
641 return IRQ_HANDLED;
642 }
643
xgbe_tx_timer(struct timer_list * t)644 static void xgbe_tx_timer(struct timer_list *t)
645 {
646 struct xgbe_channel *channel = from_timer(channel, t, tx_timer);
647 struct xgbe_prv_data *pdata = channel->pdata;
648 struct napi_struct *napi;
649
650 DBGPR("-->xgbe_tx_timer\n");
651
652 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
653
654 if (napi_schedule_prep(napi)) {
655 /* Disable Tx and Rx interrupts */
656 if (pdata->per_channel_irq)
657 if (pdata->channel_irq_mode)
658 xgbe_disable_rx_tx_int(pdata, channel);
659 else
660 disable_irq_nosync(channel->dma_irq);
661 else
662 xgbe_disable_rx_tx_ints(pdata);
663
664 /* Turn on polling */
665 __napi_schedule(napi);
666 }
667
668 channel->tx_timer_active = 0;
669
670 DBGPR("<--xgbe_tx_timer\n");
671 }
672
xgbe_service(struct work_struct * work)673 static void xgbe_service(struct work_struct *work)
674 {
675 struct xgbe_prv_data *pdata = container_of(work,
676 struct xgbe_prv_data,
677 service_work);
678
679 pdata->phy_if.phy_status(pdata);
680 }
681
xgbe_service_timer(struct timer_list * t)682 static void xgbe_service_timer(struct timer_list *t)
683 {
684 struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
685
686 queue_work(pdata->dev_workqueue, &pdata->service_work);
687
688 mod_timer(&pdata->service_timer, jiffies + HZ);
689 }
690
xgbe_init_timers(struct xgbe_prv_data * pdata)691 static void xgbe_init_timers(struct xgbe_prv_data *pdata)
692 {
693 struct xgbe_channel *channel;
694 unsigned int i;
695
696 timer_setup(&pdata->service_timer, xgbe_service_timer, 0);
697
698 for (i = 0; i < pdata->channel_count; i++) {
699 channel = pdata->channel[i];
700 if (!channel->tx_ring)
701 break;
702
703 timer_setup(&channel->tx_timer, xgbe_tx_timer, 0);
704 }
705 }
706
xgbe_start_timers(struct xgbe_prv_data * pdata)707 static void xgbe_start_timers(struct xgbe_prv_data *pdata)
708 {
709 mod_timer(&pdata->service_timer, jiffies + HZ);
710 }
711
xgbe_stop_timers(struct xgbe_prv_data * pdata)712 static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
713 {
714 struct xgbe_channel *channel;
715 unsigned int i;
716
717 del_timer_sync(&pdata->service_timer);
718
719 for (i = 0; i < pdata->channel_count; i++) {
720 channel = pdata->channel[i];
721 if (!channel->tx_ring)
722 break;
723
724 /* Deactivate the Tx timer */
725 del_timer_sync(&channel->tx_timer);
726 channel->tx_timer_active = 0;
727 }
728 }
729
xgbe_get_all_hw_features(struct xgbe_prv_data * pdata)730 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
731 {
732 unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
733 struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
734
735 mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
736 mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
737 mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
738
739 memset(hw_feat, 0, sizeof(*hw_feat));
740
741 hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
742
743 /* Hardware feature register 0 */
744 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
745 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
746 hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
747 hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
748 hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
749 hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
750 hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
751 hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
752 hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
753 hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
754 hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
755 hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
756 ADDMACADRSEL);
757 hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
758 hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
759 hw_feat->vxn = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VXN);
760
761 /* Hardware feature register 1 */
762 hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
763 RXFIFOSIZE);
764 hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
765 TXFIFOSIZE);
766 hw_feat->adv_ts_hi = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
767 hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
768 hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
769 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
770 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
771 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
772 hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
773 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
774 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
775 HASHTBLSZ);
776 hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
777 L3L4FNUM);
778
779 /* Hardware feature register 2 */
780 hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
781 hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
782 hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
783 hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
784 hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
785 hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
786
787 /* Translate the Hash Table size into actual number */
788 switch (hw_feat->hash_table_size) {
789 case 0:
790 break;
791 case 1:
792 hw_feat->hash_table_size = 64;
793 break;
794 case 2:
795 hw_feat->hash_table_size = 128;
796 break;
797 case 3:
798 hw_feat->hash_table_size = 256;
799 break;
800 }
801
802 /* Translate the address width setting into actual number */
803 switch (hw_feat->dma_width) {
804 case 0:
805 hw_feat->dma_width = 32;
806 break;
807 case 1:
808 hw_feat->dma_width = 40;
809 break;
810 case 2:
811 hw_feat->dma_width = 48;
812 break;
813 default:
814 hw_feat->dma_width = 32;
815 }
816
817 /* The Queue, Channel and TC counts are zero based so increment them
818 * to get the actual number
819 */
820 hw_feat->rx_q_cnt++;
821 hw_feat->tx_q_cnt++;
822 hw_feat->rx_ch_cnt++;
823 hw_feat->tx_ch_cnt++;
824 hw_feat->tc_cnt++;
825
826 /* Translate the fifo sizes into actual numbers */
827 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
828 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
829
830 if (netif_msg_probe(pdata)) {
831 dev_dbg(pdata->dev, "Hardware features:\n");
832
833 /* Hardware feature register 0 */
834 dev_dbg(pdata->dev, " 1GbE support : %s\n",
835 hw_feat->gmii ? "yes" : "no");
836 dev_dbg(pdata->dev, " VLAN hash filter : %s\n",
837 hw_feat->vlhash ? "yes" : "no");
838 dev_dbg(pdata->dev, " MDIO interface : %s\n",
839 hw_feat->sma ? "yes" : "no");
840 dev_dbg(pdata->dev, " Wake-up packet support : %s\n",
841 hw_feat->rwk ? "yes" : "no");
842 dev_dbg(pdata->dev, " Magic packet support : %s\n",
843 hw_feat->mgk ? "yes" : "no");
844 dev_dbg(pdata->dev, " Management counters : %s\n",
845 hw_feat->mmc ? "yes" : "no");
846 dev_dbg(pdata->dev, " ARP offload : %s\n",
847 hw_feat->aoe ? "yes" : "no");
848 dev_dbg(pdata->dev, " IEEE 1588-2008 Timestamp : %s\n",
849 hw_feat->ts ? "yes" : "no");
850 dev_dbg(pdata->dev, " Energy Efficient Ethernet : %s\n",
851 hw_feat->eee ? "yes" : "no");
852 dev_dbg(pdata->dev, " TX checksum offload : %s\n",
853 hw_feat->tx_coe ? "yes" : "no");
854 dev_dbg(pdata->dev, " RX checksum offload : %s\n",
855 hw_feat->rx_coe ? "yes" : "no");
856 dev_dbg(pdata->dev, " Additional MAC addresses : %u\n",
857 hw_feat->addn_mac);
858 dev_dbg(pdata->dev, " Timestamp source : %s\n",
859 (hw_feat->ts_src == 1) ? "internal" :
860 (hw_feat->ts_src == 2) ? "external" :
861 (hw_feat->ts_src == 3) ? "internal/external" : "n/a");
862 dev_dbg(pdata->dev, " SA/VLAN insertion : %s\n",
863 hw_feat->sa_vlan_ins ? "yes" : "no");
864 dev_dbg(pdata->dev, " VXLAN/NVGRE support : %s\n",
865 hw_feat->vxn ? "yes" : "no");
866
867 /* Hardware feature register 1 */
868 dev_dbg(pdata->dev, " RX fifo size : %u\n",
869 hw_feat->rx_fifo_size);
870 dev_dbg(pdata->dev, " TX fifo size : %u\n",
871 hw_feat->tx_fifo_size);
872 dev_dbg(pdata->dev, " IEEE 1588 high word : %s\n",
873 hw_feat->adv_ts_hi ? "yes" : "no");
874 dev_dbg(pdata->dev, " DMA width : %u\n",
875 hw_feat->dma_width);
876 dev_dbg(pdata->dev, " Data Center Bridging : %s\n",
877 hw_feat->dcb ? "yes" : "no");
878 dev_dbg(pdata->dev, " Split header : %s\n",
879 hw_feat->sph ? "yes" : "no");
880 dev_dbg(pdata->dev, " TCP Segmentation Offload : %s\n",
881 hw_feat->tso ? "yes" : "no");
882 dev_dbg(pdata->dev, " Debug memory interface : %s\n",
883 hw_feat->dma_debug ? "yes" : "no");
884 dev_dbg(pdata->dev, " Receive Side Scaling : %s\n",
885 hw_feat->rss ? "yes" : "no");
886 dev_dbg(pdata->dev, " Traffic Class count : %u\n",
887 hw_feat->tc_cnt);
888 dev_dbg(pdata->dev, " Hash table size : %u\n",
889 hw_feat->hash_table_size);
890 dev_dbg(pdata->dev, " L3/L4 Filters : %u\n",
891 hw_feat->l3l4_filter_num);
892
893 /* Hardware feature register 2 */
894 dev_dbg(pdata->dev, " RX queue count : %u\n",
895 hw_feat->rx_q_cnt);
896 dev_dbg(pdata->dev, " TX queue count : %u\n",
897 hw_feat->tx_q_cnt);
898 dev_dbg(pdata->dev, " RX DMA channel count : %u\n",
899 hw_feat->rx_ch_cnt);
900 dev_dbg(pdata->dev, " TX DMA channel count : %u\n",
901 hw_feat->rx_ch_cnt);
902 dev_dbg(pdata->dev, " PPS outputs : %u\n",
903 hw_feat->pps_out_num);
904 dev_dbg(pdata->dev, " Auxiliary snapshot inputs : %u\n",
905 hw_feat->aux_snap_num);
906 }
907 }
908
xgbe_vxlan_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)909 static int xgbe_vxlan_set_port(struct net_device *netdev, unsigned int table,
910 unsigned int entry, struct udp_tunnel_info *ti)
911 {
912 struct xgbe_prv_data *pdata = netdev_priv(netdev);
913
914 pdata->vxlan_port = be16_to_cpu(ti->port);
915 pdata->hw_if.enable_vxlan(pdata);
916
917 return 0;
918 }
919
xgbe_vxlan_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)920 static int xgbe_vxlan_unset_port(struct net_device *netdev, unsigned int table,
921 unsigned int entry, struct udp_tunnel_info *ti)
922 {
923 struct xgbe_prv_data *pdata = netdev_priv(netdev);
924
925 pdata->hw_if.disable_vxlan(pdata);
926 pdata->vxlan_port = 0;
927
928 return 0;
929 }
930
931 static const struct udp_tunnel_nic_info xgbe_udp_tunnels = {
932 .set_port = xgbe_vxlan_set_port,
933 .unset_port = xgbe_vxlan_unset_port,
934 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
935 .tables = {
936 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
937 },
938 };
939
xgbe_get_udp_tunnel_info(void)940 const struct udp_tunnel_nic_info *xgbe_get_udp_tunnel_info(void)
941 {
942 return &xgbe_udp_tunnels;
943 }
944
xgbe_napi_enable(struct xgbe_prv_data * pdata,unsigned int add)945 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
946 {
947 struct xgbe_channel *channel;
948 unsigned int i;
949
950 if (pdata->per_channel_irq) {
951 for (i = 0; i < pdata->channel_count; i++) {
952 channel = pdata->channel[i];
953 if (add)
954 netif_napi_add(pdata->netdev, &channel->napi,
955 xgbe_one_poll, NAPI_POLL_WEIGHT);
956
957 napi_enable(&channel->napi);
958 }
959 } else {
960 if (add)
961 netif_napi_add(pdata->netdev, &pdata->napi,
962 xgbe_all_poll, NAPI_POLL_WEIGHT);
963
964 napi_enable(&pdata->napi);
965 }
966 }
967
xgbe_napi_disable(struct xgbe_prv_data * pdata,unsigned int del)968 static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
969 {
970 struct xgbe_channel *channel;
971 unsigned int i;
972
973 if (pdata->per_channel_irq) {
974 for (i = 0; i < pdata->channel_count; i++) {
975 channel = pdata->channel[i];
976 napi_disable(&channel->napi);
977
978 if (del)
979 netif_napi_del(&channel->napi);
980 }
981 } else {
982 napi_disable(&pdata->napi);
983
984 if (del)
985 netif_napi_del(&pdata->napi);
986 }
987 }
988
xgbe_request_irqs(struct xgbe_prv_data * pdata)989 static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
990 {
991 struct xgbe_channel *channel;
992 struct net_device *netdev = pdata->netdev;
993 unsigned int i;
994 int ret;
995
996 tasklet_setup(&pdata->tasklet_dev, xgbe_isr_task);
997 tasklet_setup(&pdata->tasklet_ecc, xgbe_ecc_isr_task);
998
999 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
1000 netdev_name(netdev), pdata);
1001 if (ret) {
1002 netdev_alert(netdev, "error requesting irq %d\n",
1003 pdata->dev_irq);
1004 return ret;
1005 }
1006
1007 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) {
1008 ret = devm_request_irq(pdata->dev, pdata->ecc_irq, xgbe_ecc_isr,
1009 0, pdata->ecc_name, pdata);
1010 if (ret) {
1011 netdev_alert(netdev, "error requesting ecc irq %d\n",
1012 pdata->ecc_irq);
1013 goto err_dev_irq;
1014 }
1015 }
1016
1017 if (!pdata->per_channel_irq)
1018 return 0;
1019
1020 for (i = 0; i < pdata->channel_count; i++) {
1021 channel = pdata->channel[i];
1022 snprintf(channel->dma_irq_name,
1023 sizeof(channel->dma_irq_name) - 1,
1024 "%s-TxRx-%u", netdev_name(netdev),
1025 channel->queue_index);
1026
1027 ret = devm_request_irq(pdata->dev, channel->dma_irq,
1028 xgbe_dma_isr, 0,
1029 channel->dma_irq_name, channel);
1030 if (ret) {
1031 netdev_alert(netdev, "error requesting irq %d\n",
1032 channel->dma_irq);
1033 goto err_dma_irq;
1034 }
1035
1036 irq_set_affinity_hint(channel->dma_irq,
1037 &channel->affinity_mask);
1038 }
1039
1040 return 0;
1041
1042 err_dma_irq:
1043 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
1044 for (i--; i < pdata->channel_count; i--) {
1045 channel = pdata->channel[i];
1046
1047 irq_set_affinity_hint(channel->dma_irq, NULL);
1048 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1049 }
1050
1051 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
1052 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
1053
1054 err_dev_irq:
1055 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1056
1057 return ret;
1058 }
1059
xgbe_free_irqs(struct xgbe_prv_data * pdata)1060 static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
1061 {
1062 struct xgbe_channel *channel;
1063 unsigned int i;
1064
1065 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1066
1067 tasklet_kill(&pdata->tasklet_dev);
1068 tasklet_kill(&pdata->tasklet_ecc);
1069
1070 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
1071 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
1072
1073 if (!pdata->per_channel_irq)
1074 return;
1075
1076 for (i = 0; i < pdata->channel_count; i++) {
1077 channel = pdata->channel[i];
1078
1079 irq_set_affinity_hint(channel->dma_irq, NULL);
1080 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1081 }
1082 }
1083
xgbe_init_tx_coalesce(struct xgbe_prv_data * pdata)1084 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
1085 {
1086 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1087
1088 DBGPR("-->xgbe_init_tx_coalesce\n");
1089
1090 pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
1091 pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
1092
1093 hw_if->config_tx_coalesce(pdata);
1094
1095 DBGPR("<--xgbe_init_tx_coalesce\n");
1096 }
1097
xgbe_init_rx_coalesce(struct xgbe_prv_data * pdata)1098 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
1099 {
1100 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1101
1102 DBGPR("-->xgbe_init_rx_coalesce\n");
1103
1104 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
1105 pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
1106 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
1107
1108 hw_if->config_rx_coalesce(pdata);
1109
1110 DBGPR("<--xgbe_init_rx_coalesce\n");
1111 }
1112
xgbe_free_tx_data(struct xgbe_prv_data * pdata)1113 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
1114 {
1115 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1116 struct xgbe_ring *ring;
1117 struct xgbe_ring_data *rdata;
1118 unsigned int i, j;
1119
1120 DBGPR("-->xgbe_free_tx_data\n");
1121
1122 for (i = 0; i < pdata->channel_count; i++) {
1123 ring = pdata->channel[i]->tx_ring;
1124 if (!ring)
1125 break;
1126
1127 for (j = 0; j < ring->rdesc_count; j++) {
1128 rdata = XGBE_GET_DESC_DATA(ring, j);
1129 desc_if->unmap_rdata(pdata, rdata);
1130 }
1131 }
1132
1133 DBGPR("<--xgbe_free_tx_data\n");
1134 }
1135
xgbe_free_rx_data(struct xgbe_prv_data * pdata)1136 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
1137 {
1138 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1139 struct xgbe_ring *ring;
1140 struct xgbe_ring_data *rdata;
1141 unsigned int i, j;
1142
1143 DBGPR("-->xgbe_free_rx_data\n");
1144
1145 for (i = 0; i < pdata->channel_count; i++) {
1146 ring = pdata->channel[i]->rx_ring;
1147 if (!ring)
1148 break;
1149
1150 for (j = 0; j < ring->rdesc_count; j++) {
1151 rdata = XGBE_GET_DESC_DATA(ring, j);
1152 desc_if->unmap_rdata(pdata, rdata);
1153 }
1154 }
1155
1156 DBGPR("<--xgbe_free_rx_data\n");
1157 }
1158
xgbe_phy_reset(struct xgbe_prv_data * pdata)1159 static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
1160 {
1161 pdata->phy_link = -1;
1162 pdata->phy_speed = SPEED_UNKNOWN;
1163
1164 return pdata->phy_if.phy_reset(pdata);
1165 }
1166
xgbe_powerdown(struct net_device * netdev,unsigned int caller)1167 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
1168 {
1169 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1170 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1171 unsigned long flags;
1172
1173 DBGPR("-->xgbe_powerdown\n");
1174
1175 if (!netif_running(netdev) ||
1176 (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
1177 netdev_alert(netdev, "Device is already powered down\n");
1178 DBGPR("<--xgbe_powerdown\n");
1179 return -EINVAL;
1180 }
1181
1182 spin_lock_irqsave(&pdata->lock, flags);
1183
1184 if (caller == XGMAC_DRIVER_CONTEXT)
1185 netif_device_detach(netdev);
1186
1187 netif_tx_stop_all_queues(netdev);
1188
1189 xgbe_stop_timers(pdata);
1190 flush_workqueue(pdata->dev_workqueue);
1191
1192 hw_if->powerdown_tx(pdata);
1193 hw_if->powerdown_rx(pdata);
1194
1195 xgbe_napi_disable(pdata, 0);
1196
1197 pdata->power_down = 1;
1198
1199 spin_unlock_irqrestore(&pdata->lock, flags);
1200
1201 DBGPR("<--xgbe_powerdown\n");
1202
1203 return 0;
1204 }
1205
xgbe_powerup(struct net_device * netdev,unsigned int caller)1206 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
1207 {
1208 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1209 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1210 unsigned long flags;
1211
1212 DBGPR("-->xgbe_powerup\n");
1213
1214 if (!netif_running(netdev) ||
1215 (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
1216 netdev_alert(netdev, "Device is already powered up\n");
1217 DBGPR("<--xgbe_powerup\n");
1218 return -EINVAL;
1219 }
1220
1221 spin_lock_irqsave(&pdata->lock, flags);
1222
1223 pdata->power_down = 0;
1224
1225 xgbe_napi_enable(pdata, 0);
1226
1227 hw_if->powerup_tx(pdata);
1228 hw_if->powerup_rx(pdata);
1229
1230 if (caller == XGMAC_DRIVER_CONTEXT)
1231 netif_device_attach(netdev);
1232
1233 netif_tx_start_all_queues(netdev);
1234
1235 xgbe_start_timers(pdata);
1236
1237 spin_unlock_irqrestore(&pdata->lock, flags);
1238
1239 DBGPR("<--xgbe_powerup\n");
1240
1241 return 0;
1242 }
1243
xgbe_free_memory(struct xgbe_prv_data * pdata)1244 static void xgbe_free_memory(struct xgbe_prv_data *pdata)
1245 {
1246 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1247
1248 /* Free the ring descriptors and buffers */
1249 desc_if->free_ring_resources(pdata);
1250
1251 /* Free the channel and ring structures */
1252 xgbe_free_channels(pdata);
1253 }
1254
xgbe_alloc_memory(struct xgbe_prv_data * pdata)1255 static int xgbe_alloc_memory(struct xgbe_prv_data *pdata)
1256 {
1257 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1258 struct net_device *netdev = pdata->netdev;
1259 int ret;
1260
1261 if (pdata->new_tx_ring_count) {
1262 pdata->tx_ring_count = pdata->new_tx_ring_count;
1263 pdata->tx_q_count = pdata->tx_ring_count;
1264 pdata->new_tx_ring_count = 0;
1265 }
1266
1267 if (pdata->new_rx_ring_count) {
1268 pdata->rx_ring_count = pdata->new_rx_ring_count;
1269 pdata->new_rx_ring_count = 0;
1270 }
1271
1272 /* Calculate the Rx buffer size before allocating rings */
1273 pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
1274
1275 /* Allocate the channel and ring structures */
1276 ret = xgbe_alloc_channels(pdata);
1277 if (ret)
1278 return ret;
1279
1280 /* Allocate the ring descriptors and buffers */
1281 ret = desc_if->alloc_ring_resources(pdata);
1282 if (ret)
1283 goto err_channels;
1284
1285 /* Initialize the service and Tx timers */
1286 xgbe_init_timers(pdata);
1287
1288 return 0;
1289
1290 err_channels:
1291 xgbe_free_memory(pdata);
1292
1293 return ret;
1294 }
1295
xgbe_start(struct xgbe_prv_data * pdata)1296 static int xgbe_start(struct xgbe_prv_data *pdata)
1297 {
1298 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1299 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1300 struct net_device *netdev = pdata->netdev;
1301 unsigned int i;
1302 int ret;
1303
1304 /* Set the number of queues */
1305 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
1306 if (ret) {
1307 netdev_err(netdev, "error setting real tx queue count\n");
1308 return ret;
1309 }
1310
1311 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
1312 if (ret) {
1313 netdev_err(netdev, "error setting real rx queue count\n");
1314 return ret;
1315 }
1316
1317 /* Set RSS lookup table data for programming */
1318 for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
1319 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
1320 i % pdata->rx_ring_count);
1321
1322 ret = hw_if->init(pdata);
1323 if (ret)
1324 return ret;
1325
1326 xgbe_napi_enable(pdata, 1);
1327
1328 ret = xgbe_request_irqs(pdata);
1329 if (ret)
1330 goto err_napi;
1331
1332 ret = phy_if->phy_start(pdata);
1333 if (ret)
1334 goto err_irqs;
1335
1336 hw_if->enable_tx(pdata);
1337 hw_if->enable_rx(pdata);
1338
1339 udp_tunnel_nic_reset_ntf(netdev);
1340
1341 netif_tx_start_all_queues(netdev);
1342
1343 xgbe_start_timers(pdata);
1344 queue_work(pdata->dev_workqueue, &pdata->service_work);
1345
1346 clear_bit(XGBE_STOPPED, &pdata->dev_state);
1347
1348 return 0;
1349
1350 err_irqs:
1351 xgbe_free_irqs(pdata);
1352
1353 err_napi:
1354 xgbe_napi_disable(pdata, 1);
1355
1356 hw_if->exit(pdata);
1357
1358 return ret;
1359 }
1360
xgbe_stop(struct xgbe_prv_data * pdata)1361 static void xgbe_stop(struct xgbe_prv_data *pdata)
1362 {
1363 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1364 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1365 struct xgbe_channel *channel;
1366 struct net_device *netdev = pdata->netdev;
1367 struct netdev_queue *txq;
1368 unsigned int i;
1369
1370 DBGPR("-->xgbe_stop\n");
1371
1372 if (test_bit(XGBE_STOPPED, &pdata->dev_state))
1373 return;
1374
1375 netif_tx_stop_all_queues(netdev);
1376 netif_carrier_off(pdata->netdev);
1377
1378 xgbe_stop_timers(pdata);
1379 flush_workqueue(pdata->dev_workqueue);
1380
1381 xgbe_vxlan_unset_port(netdev, 0, 0, NULL);
1382
1383 hw_if->disable_tx(pdata);
1384 hw_if->disable_rx(pdata);
1385
1386 phy_if->phy_stop(pdata);
1387
1388 xgbe_free_irqs(pdata);
1389
1390 xgbe_napi_disable(pdata, 1);
1391
1392 hw_if->exit(pdata);
1393
1394 for (i = 0; i < pdata->channel_count; i++) {
1395 channel = pdata->channel[i];
1396 if (!channel->tx_ring)
1397 continue;
1398
1399 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1400 netdev_tx_reset_queue(txq);
1401 }
1402
1403 set_bit(XGBE_STOPPED, &pdata->dev_state);
1404
1405 DBGPR("<--xgbe_stop\n");
1406 }
1407
xgbe_stopdev(struct work_struct * work)1408 static void xgbe_stopdev(struct work_struct *work)
1409 {
1410 struct xgbe_prv_data *pdata = container_of(work,
1411 struct xgbe_prv_data,
1412 stopdev_work);
1413
1414 rtnl_lock();
1415
1416 xgbe_stop(pdata);
1417
1418 xgbe_free_tx_data(pdata);
1419 xgbe_free_rx_data(pdata);
1420
1421 rtnl_unlock();
1422
1423 netdev_alert(pdata->netdev, "device stopped\n");
1424 }
1425
xgbe_full_restart_dev(struct xgbe_prv_data * pdata)1426 void xgbe_full_restart_dev(struct xgbe_prv_data *pdata)
1427 {
1428 /* If not running, "restart" will happen on open */
1429 if (!netif_running(pdata->netdev))
1430 return;
1431
1432 xgbe_stop(pdata);
1433
1434 xgbe_free_memory(pdata);
1435 xgbe_alloc_memory(pdata);
1436
1437 xgbe_start(pdata);
1438 }
1439
xgbe_restart_dev(struct xgbe_prv_data * pdata)1440 void xgbe_restart_dev(struct xgbe_prv_data *pdata)
1441 {
1442 /* If not running, "restart" will happen on open */
1443 if (!netif_running(pdata->netdev))
1444 return;
1445
1446 xgbe_stop(pdata);
1447
1448 xgbe_free_tx_data(pdata);
1449 xgbe_free_rx_data(pdata);
1450
1451 xgbe_start(pdata);
1452 }
1453
xgbe_restart(struct work_struct * work)1454 static void xgbe_restart(struct work_struct *work)
1455 {
1456 struct xgbe_prv_data *pdata = container_of(work,
1457 struct xgbe_prv_data,
1458 restart_work);
1459
1460 rtnl_lock();
1461
1462 xgbe_restart_dev(pdata);
1463
1464 rtnl_unlock();
1465 }
1466
xgbe_tx_tstamp(struct work_struct * work)1467 static void xgbe_tx_tstamp(struct work_struct *work)
1468 {
1469 struct xgbe_prv_data *pdata = container_of(work,
1470 struct xgbe_prv_data,
1471 tx_tstamp_work);
1472 struct skb_shared_hwtstamps hwtstamps;
1473 u64 nsec;
1474 unsigned long flags;
1475
1476 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1477 if (!pdata->tx_tstamp_skb)
1478 goto unlock;
1479
1480 if (pdata->tx_tstamp) {
1481 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
1482 pdata->tx_tstamp);
1483
1484 memset(&hwtstamps, 0, sizeof(hwtstamps));
1485 hwtstamps.hwtstamp = ns_to_ktime(nsec);
1486 skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
1487 }
1488
1489 dev_kfree_skb_any(pdata->tx_tstamp_skb);
1490
1491 pdata->tx_tstamp_skb = NULL;
1492
1493 unlock:
1494 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1495 }
1496
xgbe_get_hwtstamp_settings(struct xgbe_prv_data * pdata,struct ifreq * ifreq)1497 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
1498 struct ifreq *ifreq)
1499 {
1500 if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
1501 sizeof(pdata->tstamp_config)))
1502 return -EFAULT;
1503
1504 return 0;
1505 }
1506
xgbe_set_hwtstamp_settings(struct xgbe_prv_data * pdata,struct ifreq * ifreq)1507 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
1508 struct ifreq *ifreq)
1509 {
1510 struct hwtstamp_config config;
1511 unsigned int mac_tscr;
1512
1513 if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
1514 return -EFAULT;
1515
1516 if (config.flags)
1517 return -EINVAL;
1518
1519 mac_tscr = 0;
1520
1521 switch (config.tx_type) {
1522 case HWTSTAMP_TX_OFF:
1523 break;
1524
1525 case HWTSTAMP_TX_ON:
1526 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1527 break;
1528
1529 default:
1530 return -ERANGE;
1531 }
1532
1533 switch (config.rx_filter) {
1534 case HWTSTAMP_FILTER_NONE:
1535 break;
1536
1537 case HWTSTAMP_FILTER_NTP_ALL:
1538 case HWTSTAMP_FILTER_ALL:
1539 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
1540 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1541 break;
1542
1543 /* PTP v2, UDP, any kind of event packet */
1544 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1545 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1546 fallthrough; /* to PTP v1, UDP, any kind of event packet */
1547 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1548 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1549 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1550 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1551 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1552 break;
1553
1554 /* PTP v2, UDP, Sync packet */
1555 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1556 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1557 fallthrough; /* to PTP v1, UDP, Sync packet */
1558 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1559 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1560 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1561 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1562 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1563 break;
1564
1565 /* PTP v2, UDP, Delay_req packet */
1566 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1567 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1568 fallthrough; /* to PTP v1, UDP, Delay_req packet */
1569 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1570 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1571 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1572 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1573 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1574 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1575 break;
1576
1577 /* 802.AS1, Ethernet, any kind of event packet */
1578 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1579 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1580 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1581 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1582 break;
1583
1584 /* 802.AS1, Ethernet, Sync packet */
1585 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1586 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1587 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1588 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1589 break;
1590
1591 /* 802.AS1, Ethernet, Delay_req packet */
1592 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1593 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1594 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1595 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1596 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1597 break;
1598
1599 /* PTP v2/802.AS1, any layer, any kind of event packet */
1600 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1601 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1602 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1603 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1604 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1605 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1606 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1607 break;
1608
1609 /* PTP v2/802.AS1, any layer, Sync packet */
1610 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1611 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1612 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1613 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1614 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1615 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1616 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1617 break;
1618
1619 /* PTP v2/802.AS1, any layer, Delay_req packet */
1620 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1621 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1622 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1623 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1624 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1625 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1626 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1627 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1628 break;
1629
1630 default:
1631 return -ERANGE;
1632 }
1633
1634 pdata->hw_if.config_tstamp(pdata, mac_tscr);
1635
1636 memcpy(&pdata->tstamp_config, &config, sizeof(config));
1637
1638 return 0;
1639 }
1640
xgbe_prep_tx_tstamp(struct xgbe_prv_data * pdata,struct sk_buff * skb,struct xgbe_packet_data * packet)1641 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
1642 struct sk_buff *skb,
1643 struct xgbe_packet_data *packet)
1644 {
1645 unsigned long flags;
1646
1647 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
1648 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1649 if (pdata->tx_tstamp_skb) {
1650 /* Another timestamp in progress, ignore this one */
1651 XGMAC_SET_BITS(packet->attributes,
1652 TX_PACKET_ATTRIBUTES, PTP, 0);
1653 } else {
1654 pdata->tx_tstamp_skb = skb_get(skb);
1655 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1656 }
1657 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1658 }
1659
1660 skb_tx_timestamp(skb);
1661 }
1662
xgbe_prep_vlan(struct sk_buff * skb,struct xgbe_packet_data * packet)1663 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
1664 {
1665 if (skb_vlan_tag_present(skb))
1666 packet->vlan_ctag = skb_vlan_tag_get(skb);
1667 }
1668
xgbe_prep_tso(struct sk_buff * skb,struct xgbe_packet_data * packet)1669 static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
1670 {
1671 int ret;
1672
1673 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1674 TSO_ENABLE))
1675 return 0;
1676
1677 ret = skb_cow_head(skb, 0);
1678 if (ret)
1679 return ret;
1680
1681 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) {
1682 packet->header_len = skb_inner_transport_offset(skb) +
1683 inner_tcp_hdrlen(skb);
1684 packet->tcp_header_len = inner_tcp_hdrlen(skb);
1685 } else {
1686 packet->header_len = skb_transport_offset(skb) +
1687 tcp_hdrlen(skb);
1688 packet->tcp_header_len = tcp_hdrlen(skb);
1689 }
1690 packet->tcp_payload_len = skb->len - packet->header_len;
1691 packet->mss = skb_shinfo(skb)->gso_size;
1692
1693 DBGPR(" packet->header_len=%u\n", packet->header_len);
1694 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1695 packet->tcp_header_len, packet->tcp_payload_len);
1696 DBGPR(" packet->mss=%u\n", packet->mss);
1697
1698 /* Update the number of packets that will ultimately be transmitted
1699 * along with the extra bytes for each extra packet
1700 */
1701 packet->tx_packets = skb_shinfo(skb)->gso_segs;
1702 packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
1703
1704 return 0;
1705 }
1706
xgbe_is_vxlan(struct sk_buff * skb)1707 static bool xgbe_is_vxlan(struct sk_buff *skb)
1708 {
1709 if (!skb->encapsulation)
1710 return false;
1711
1712 if (skb->ip_summed != CHECKSUM_PARTIAL)
1713 return false;
1714
1715 switch (skb->protocol) {
1716 case htons(ETH_P_IP):
1717 if (ip_hdr(skb)->protocol != IPPROTO_UDP)
1718 return false;
1719 break;
1720
1721 case htons(ETH_P_IPV6):
1722 if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
1723 return false;
1724 break;
1725
1726 default:
1727 return false;
1728 }
1729
1730 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
1731 skb->inner_protocol != htons(ETH_P_TEB) ||
1732 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
1733 sizeof(struct udphdr) + sizeof(struct vxlanhdr)))
1734 return false;
1735
1736 return true;
1737 }
1738
xgbe_is_tso(struct sk_buff * skb)1739 static int xgbe_is_tso(struct sk_buff *skb)
1740 {
1741 if (skb->ip_summed != CHECKSUM_PARTIAL)
1742 return 0;
1743
1744 if (!skb_is_gso(skb))
1745 return 0;
1746
1747 DBGPR(" TSO packet to be processed\n");
1748
1749 return 1;
1750 }
1751
xgbe_packet_info(struct xgbe_prv_data * pdata,struct xgbe_ring * ring,struct sk_buff * skb,struct xgbe_packet_data * packet)1752 static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1753 struct xgbe_ring *ring, struct sk_buff *skb,
1754 struct xgbe_packet_data *packet)
1755 {
1756 skb_frag_t *frag;
1757 unsigned int context_desc;
1758 unsigned int len;
1759 unsigned int i;
1760
1761 packet->skb = skb;
1762
1763 context_desc = 0;
1764 packet->rdesc_count = 0;
1765
1766 packet->tx_packets = 1;
1767 packet->tx_bytes = skb->len;
1768
1769 if (xgbe_is_tso(skb)) {
1770 /* TSO requires an extra descriptor if mss is different */
1771 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
1772 context_desc = 1;
1773 packet->rdesc_count++;
1774 }
1775
1776 /* TSO requires an extra descriptor for TSO header */
1777 packet->rdesc_count++;
1778
1779 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1780 TSO_ENABLE, 1);
1781 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1782 CSUM_ENABLE, 1);
1783 } else if (skb->ip_summed == CHECKSUM_PARTIAL)
1784 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1785 CSUM_ENABLE, 1);
1786
1787 if (xgbe_is_vxlan(skb))
1788 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1789 VXLAN, 1);
1790
1791 if (skb_vlan_tag_present(skb)) {
1792 /* VLAN requires an extra descriptor if tag is different */
1793 if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
1794 /* We can share with the TSO context descriptor */
1795 if (!context_desc) {
1796 context_desc = 1;
1797 packet->rdesc_count++;
1798 }
1799
1800 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1801 VLAN_CTAG, 1);
1802 }
1803
1804 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1805 (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
1806 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1807 PTP, 1);
1808
1809 for (len = skb_headlen(skb); len;) {
1810 packet->rdesc_count++;
1811 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1812 }
1813
1814 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1815 frag = &skb_shinfo(skb)->frags[i];
1816 for (len = skb_frag_size(frag); len; ) {
1817 packet->rdesc_count++;
1818 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1819 }
1820 }
1821 }
1822
xgbe_open(struct net_device * netdev)1823 static int xgbe_open(struct net_device *netdev)
1824 {
1825 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1826 int ret;
1827
1828 /* Create the various names based on netdev name */
1829 snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
1830 netdev_name(netdev));
1831
1832 snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc",
1833 netdev_name(netdev));
1834
1835 snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c",
1836 netdev_name(netdev));
1837
1838 /* Create workqueues */
1839 pdata->dev_workqueue =
1840 create_singlethread_workqueue(netdev_name(netdev));
1841 if (!pdata->dev_workqueue) {
1842 netdev_err(netdev, "device workqueue creation failed\n");
1843 return -ENOMEM;
1844 }
1845
1846 pdata->an_workqueue =
1847 create_singlethread_workqueue(pdata->an_name);
1848 if (!pdata->an_workqueue) {
1849 netdev_err(netdev, "phy workqueue creation failed\n");
1850 ret = -ENOMEM;
1851 goto err_dev_wq;
1852 }
1853
1854 /* Reset the phy settings */
1855 ret = xgbe_phy_reset(pdata);
1856 if (ret)
1857 goto err_an_wq;
1858
1859 /* Enable the clocks */
1860 ret = clk_prepare_enable(pdata->sysclk);
1861 if (ret) {
1862 netdev_alert(netdev, "dma clk_prepare_enable failed\n");
1863 goto err_an_wq;
1864 }
1865
1866 ret = clk_prepare_enable(pdata->ptpclk);
1867 if (ret) {
1868 netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
1869 goto err_sysclk;
1870 }
1871
1872 INIT_WORK(&pdata->service_work, xgbe_service);
1873 INIT_WORK(&pdata->restart_work, xgbe_restart);
1874 INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
1875 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1876
1877 ret = xgbe_alloc_memory(pdata);
1878 if (ret)
1879 goto err_ptpclk;
1880
1881 ret = xgbe_start(pdata);
1882 if (ret)
1883 goto err_mem;
1884
1885 clear_bit(XGBE_DOWN, &pdata->dev_state);
1886
1887 return 0;
1888
1889 err_mem:
1890 xgbe_free_memory(pdata);
1891
1892 err_ptpclk:
1893 clk_disable_unprepare(pdata->ptpclk);
1894
1895 err_sysclk:
1896 clk_disable_unprepare(pdata->sysclk);
1897
1898 err_an_wq:
1899 destroy_workqueue(pdata->an_workqueue);
1900
1901 err_dev_wq:
1902 destroy_workqueue(pdata->dev_workqueue);
1903
1904 return ret;
1905 }
1906
xgbe_close(struct net_device * netdev)1907 static int xgbe_close(struct net_device *netdev)
1908 {
1909 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1910
1911 /* Stop the device */
1912 xgbe_stop(pdata);
1913
1914 xgbe_free_memory(pdata);
1915
1916 /* Disable the clocks */
1917 clk_disable_unprepare(pdata->ptpclk);
1918 clk_disable_unprepare(pdata->sysclk);
1919
1920 flush_workqueue(pdata->an_workqueue);
1921 destroy_workqueue(pdata->an_workqueue);
1922
1923 flush_workqueue(pdata->dev_workqueue);
1924 destroy_workqueue(pdata->dev_workqueue);
1925
1926 set_bit(XGBE_DOWN, &pdata->dev_state);
1927
1928 return 0;
1929 }
1930
xgbe_xmit(struct sk_buff * skb,struct net_device * netdev)1931 static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1932 {
1933 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1934 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1935 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1936 struct xgbe_channel *channel;
1937 struct xgbe_ring *ring;
1938 struct xgbe_packet_data *packet;
1939 struct netdev_queue *txq;
1940 netdev_tx_t ret;
1941
1942 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
1943
1944 channel = pdata->channel[skb->queue_mapping];
1945 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1946 ring = channel->tx_ring;
1947 packet = &ring->packet_data;
1948
1949 ret = NETDEV_TX_OK;
1950
1951 if (skb->len == 0) {
1952 netif_err(pdata, tx_err, netdev,
1953 "empty skb received from stack\n");
1954 dev_kfree_skb_any(skb);
1955 goto tx_netdev_return;
1956 }
1957
1958 /* Calculate preliminary packet info */
1959 memset(packet, 0, sizeof(*packet));
1960 xgbe_packet_info(pdata, ring, skb, packet);
1961
1962 /* Check that there are enough descriptors available */
1963 ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
1964 if (ret)
1965 goto tx_netdev_return;
1966
1967 ret = xgbe_prep_tso(skb, packet);
1968 if (ret) {
1969 netif_err(pdata, tx_err, netdev,
1970 "error processing TSO packet\n");
1971 dev_kfree_skb_any(skb);
1972 goto tx_netdev_return;
1973 }
1974 xgbe_prep_vlan(skb, packet);
1975
1976 if (!desc_if->map_tx_skb(channel, skb)) {
1977 dev_kfree_skb_any(skb);
1978 goto tx_netdev_return;
1979 }
1980
1981 xgbe_prep_tx_tstamp(pdata, skb, packet);
1982
1983 /* Report on the actual number of bytes (to be) sent */
1984 netdev_tx_sent_queue(txq, packet->tx_bytes);
1985
1986 /* Configure required descriptor fields for transmission */
1987 hw_if->dev_xmit(channel);
1988
1989 if (netif_msg_pktdata(pdata))
1990 xgbe_print_pkt(netdev, skb, true);
1991
1992 /* Stop the queue in advance if there may not be enough descriptors */
1993 xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
1994
1995 ret = NETDEV_TX_OK;
1996
1997 tx_netdev_return:
1998 return ret;
1999 }
2000
xgbe_set_rx_mode(struct net_device * netdev)2001 static void xgbe_set_rx_mode(struct net_device *netdev)
2002 {
2003 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2004 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2005
2006 DBGPR("-->xgbe_set_rx_mode\n");
2007
2008 hw_if->config_rx_mode(pdata);
2009
2010 DBGPR("<--xgbe_set_rx_mode\n");
2011 }
2012
xgbe_set_mac_address(struct net_device * netdev,void * addr)2013 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
2014 {
2015 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2016 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2017 struct sockaddr *saddr = addr;
2018
2019 DBGPR("-->xgbe_set_mac_address\n");
2020
2021 if (!is_valid_ether_addr(saddr->sa_data))
2022 return -EADDRNOTAVAIL;
2023
2024 memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
2025
2026 hw_if->set_mac_address(pdata, netdev->dev_addr);
2027
2028 DBGPR("<--xgbe_set_mac_address\n");
2029
2030 return 0;
2031 }
2032
xgbe_ioctl(struct net_device * netdev,struct ifreq * ifreq,int cmd)2033 static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
2034 {
2035 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2036 int ret;
2037
2038 switch (cmd) {
2039 case SIOCGHWTSTAMP:
2040 ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
2041 break;
2042
2043 case SIOCSHWTSTAMP:
2044 ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
2045 break;
2046
2047 default:
2048 ret = -EOPNOTSUPP;
2049 }
2050
2051 return ret;
2052 }
2053
xgbe_change_mtu(struct net_device * netdev,int mtu)2054 static int xgbe_change_mtu(struct net_device *netdev, int mtu)
2055 {
2056 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2057 int ret;
2058
2059 DBGPR("-->xgbe_change_mtu\n");
2060
2061 ret = xgbe_calc_rx_buf_size(netdev, mtu);
2062 if (ret < 0)
2063 return ret;
2064
2065 pdata->rx_buf_size = ret;
2066 netdev->mtu = mtu;
2067
2068 xgbe_restart_dev(pdata);
2069
2070 DBGPR("<--xgbe_change_mtu\n");
2071
2072 return 0;
2073 }
2074
xgbe_tx_timeout(struct net_device * netdev,unsigned int txqueue)2075 static void xgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2076 {
2077 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2078
2079 netdev_warn(netdev, "tx timeout, device restarting\n");
2080 schedule_work(&pdata->restart_work);
2081 }
2082
xgbe_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * s)2083 static void xgbe_get_stats64(struct net_device *netdev,
2084 struct rtnl_link_stats64 *s)
2085 {
2086 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2087 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
2088
2089 DBGPR("-->%s\n", __func__);
2090
2091 pdata->hw_if.read_mmc_stats(pdata);
2092
2093 s->rx_packets = pstats->rxframecount_gb;
2094 s->rx_bytes = pstats->rxoctetcount_gb;
2095 s->rx_errors = pstats->rxframecount_gb -
2096 pstats->rxbroadcastframes_g -
2097 pstats->rxmulticastframes_g -
2098 pstats->rxunicastframes_g;
2099 s->multicast = pstats->rxmulticastframes_g;
2100 s->rx_length_errors = pstats->rxlengtherror;
2101 s->rx_crc_errors = pstats->rxcrcerror;
2102 s->rx_fifo_errors = pstats->rxfifooverflow;
2103
2104 s->tx_packets = pstats->txframecount_gb;
2105 s->tx_bytes = pstats->txoctetcount_gb;
2106 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
2107 s->tx_dropped = netdev->stats.tx_dropped;
2108
2109 DBGPR("<--%s\n", __func__);
2110 }
2111
xgbe_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2112 static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
2113 u16 vid)
2114 {
2115 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2116 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2117
2118 DBGPR("-->%s\n", __func__);
2119
2120 set_bit(vid, pdata->active_vlans);
2121 hw_if->update_vlan_hash_table(pdata);
2122
2123 DBGPR("<--%s\n", __func__);
2124
2125 return 0;
2126 }
2127
xgbe_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2128 static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
2129 u16 vid)
2130 {
2131 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2132 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2133
2134 DBGPR("-->%s\n", __func__);
2135
2136 clear_bit(vid, pdata->active_vlans);
2137 hw_if->update_vlan_hash_table(pdata);
2138
2139 DBGPR("<--%s\n", __func__);
2140
2141 return 0;
2142 }
2143
2144 #ifdef CONFIG_NET_POLL_CONTROLLER
xgbe_poll_controller(struct net_device * netdev)2145 static void xgbe_poll_controller(struct net_device *netdev)
2146 {
2147 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2148 struct xgbe_channel *channel;
2149 unsigned int i;
2150
2151 DBGPR("-->xgbe_poll_controller\n");
2152
2153 if (pdata->per_channel_irq) {
2154 for (i = 0; i < pdata->channel_count; i++) {
2155 channel = pdata->channel[i];
2156 xgbe_dma_isr(channel->dma_irq, channel);
2157 }
2158 } else {
2159 disable_irq(pdata->dev_irq);
2160 xgbe_isr(pdata->dev_irq, pdata);
2161 enable_irq(pdata->dev_irq);
2162 }
2163
2164 DBGPR("<--xgbe_poll_controller\n");
2165 }
2166 #endif /* End CONFIG_NET_POLL_CONTROLLER */
2167
xgbe_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)2168 static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type,
2169 void *type_data)
2170 {
2171 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2172 struct tc_mqprio_qopt *mqprio = type_data;
2173 u8 tc;
2174
2175 if (type != TC_SETUP_QDISC_MQPRIO)
2176 return -EOPNOTSUPP;
2177
2178 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2179 tc = mqprio->num_tc;
2180
2181 if (tc > pdata->hw_feat.tc_cnt)
2182 return -EINVAL;
2183
2184 pdata->num_tcs = tc;
2185 pdata->hw_if.config_tc(pdata);
2186
2187 return 0;
2188 }
2189
xgbe_fix_features(struct net_device * netdev,netdev_features_t features)2190 static netdev_features_t xgbe_fix_features(struct net_device *netdev,
2191 netdev_features_t features)
2192 {
2193 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2194 netdev_features_t vxlan_base;
2195
2196 vxlan_base = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RX_UDP_TUNNEL_PORT;
2197
2198 if (!pdata->hw_feat.vxn)
2199 return features;
2200
2201 /* VXLAN CSUM requires VXLAN base */
2202 if ((features & NETIF_F_GSO_UDP_TUNNEL_CSUM) &&
2203 !(features & NETIF_F_GSO_UDP_TUNNEL)) {
2204 netdev_notice(netdev,
2205 "forcing tx udp tunnel support\n");
2206 features |= NETIF_F_GSO_UDP_TUNNEL;
2207 }
2208
2209 /* Can't do one without doing the other */
2210 if ((features & vxlan_base) != vxlan_base) {
2211 netdev_notice(netdev,
2212 "forcing both tx and rx udp tunnel support\n");
2213 features |= vxlan_base;
2214 }
2215
2216 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2217 if (!(features & NETIF_F_GSO_UDP_TUNNEL_CSUM)) {
2218 netdev_notice(netdev,
2219 "forcing tx udp tunnel checksumming on\n");
2220 features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
2221 }
2222 } else {
2223 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) {
2224 netdev_notice(netdev,
2225 "forcing tx udp tunnel checksumming off\n");
2226 features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
2227 }
2228 }
2229
2230 return features;
2231 }
2232
xgbe_set_features(struct net_device * netdev,netdev_features_t features)2233 static int xgbe_set_features(struct net_device *netdev,
2234 netdev_features_t features)
2235 {
2236 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2237 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2238 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
2239 int ret = 0;
2240
2241 rxhash = pdata->netdev_features & NETIF_F_RXHASH;
2242 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
2243 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
2244 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
2245
2246 if ((features & NETIF_F_RXHASH) && !rxhash)
2247 ret = hw_if->enable_rss(pdata);
2248 else if (!(features & NETIF_F_RXHASH) && rxhash)
2249 ret = hw_if->disable_rss(pdata);
2250 if (ret)
2251 return ret;
2252
2253 if ((features & NETIF_F_RXCSUM) && !rxcsum)
2254 hw_if->enable_rx_csum(pdata);
2255 else if (!(features & NETIF_F_RXCSUM) && rxcsum)
2256 hw_if->disable_rx_csum(pdata);
2257
2258 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
2259 hw_if->enable_rx_vlan_stripping(pdata);
2260 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
2261 hw_if->disable_rx_vlan_stripping(pdata);
2262
2263 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
2264 hw_if->enable_rx_vlan_filtering(pdata);
2265 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
2266 hw_if->disable_rx_vlan_filtering(pdata);
2267
2268 pdata->netdev_features = features;
2269
2270 DBGPR("<--xgbe_set_features\n");
2271
2272 return 0;
2273 }
2274
xgbe_features_check(struct sk_buff * skb,struct net_device * netdev,netdev_features_t features)2275 static netdev_features_t xgbe_features_check(struct sk_buff *skb,
2276 struct net_device *netdev,
2277 netdev_features_t features)
2278 {
2279 features = vlan_features_check(skb, features);
2280 features = vxlan_features_check(skb, features);
2281
2282 return features;
2283 }
2284
2285 static const struct net_device_ops xgbe_netdev_ops = {
2286 .ndo_open = xgbe_open,
2287 .ndo_stop = xgbe_close,
2288 .ndo_start_xmit = xgbe_xmit,
2289 .ndo_set_rx_mode = xgbe_set_rx_mode,
2290 .ndo_set_mac_address = xgbe_set_mac_address,
2291 .ndo_validate_addr = eth_validate_addr,
2292 .ndo_do_ioctl = xgbe_ioctl,
2293 .ndo_change_mtu = xgbe_change_mtu,
2294 .ndo_tx_timeout = xgbe_tx_timeout,
2295 .ndo_get_stats64 = xgbe_get_stats64,
2296 .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
2297 .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
2298 #ifdef CONFIG_NET_POLL_CONTROLLER
2299 .ndo_poll_controller = xgbe_poll_controller,
2300 #endif
2301 .ndo_setup_tc = xgbe_setup_tc,
2302 .ndo_fix_features = xgbe_fix_features,
2303 .ndo_set_features = xgbe_set_features,
2304 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
2305 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
2306 .ndo_features_check = xgbe_features_check,
2307 };
2308
xgbe_get_netdev_ops(void)2309 const struct net_device_ops *xgbe_get_netdev_ops(void)
2310 {
2311 return &xgbe_netdev_ops;
2312 }
2313
xgbe_rx_refresh(struct xgbe_channel * channel)2314 static void xgbe_rx_refresh(struct xgbe_channel *channel)
2315 {
2316 struct xgbe_prv_data *pdata = channel->pdata;
2317 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2318 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2319 struct xgbe_ring *ring = channel->rx_ring;
2320 struct xgbe_ring_data *rdata;
2321
2322 while (ring->dirty != ring->cur) {
2323 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
2324
2325 /* Reset rdata values */
2326 desc_if->unmap_rdata(pdata, rdata);
2327
2328 if (desc_if->map_rx_buffer(pdata, ring, rdata))
2329 break;
2330
2331 hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
2332
2333 ring->dirty++;
2334 }
2335
2336 /* Make sure everything is written before the register write */
2337 wmb();
2338
2339 /* Update the Rx Tail Pointer Register with address of
2340 * the last cleaned entry */
2341 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
2342 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
2343 lower_32_bits(rdata->rdesc_dma));
2344 }
2345
xgbe_create_skb(struct xgbe_prv_data * pdata,struct napi_struct * napi,struct xgbe_ring_data * rdata,unsigned int len)2346 static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
2347 struct napi_struct *napi,
2348 struct xgbe_ring_data *rdata,
2349 unsigned int len)
2350 {
2351 struct sk_buff *skb;
2352 u8 *packet;
2353
2354 skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
2355 if (!skb)
2356 return NULL;
2357
2358 /* Pull in the header buffer which may contain just the header
2359 * or the header plus data
2360 */
2361 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
2362 rdata->rx.hdr.dma_off,
2363 rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
2364
2365 packet = page_address(rdata->rx.hdr.pa.pages) +
2366 rdata->rx.hdr.pa.pages_offset;
2367 skb_copy_to_linear_data(skb, packet, len);
2368 skb_put(skb, len);
2369
2370 return skb;
2371 }
2372
xgbe_rx_buf1_len(struct xgbe_ring_data * rdata,struct xgbe_packet_data * packet)2373 static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
2374 struct xgbe_packet_data *packet)
2375 {
2376 /* Always zero if not the first descriptor */
2377 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
2378 return 0;
2379
2380 /* First descriptor with split header, return header length */
2381 if (rdata->rx.hdr_len)
2382 return rdata->rx.hdr_len;
2383
2384 /* First descriptor but not the last descriptor and no split header,
2385 * so the full buffer was used
2386 */
2387 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2388 return rdata->rx.hdr.dma_len;
2389
2390 /* First descriptor and last descriptor and no split header, so
2391 * calculate how much of the buffer was used
2392 */
2393 return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
2394 }
2395
xgbe_rx_buf2_len(struct xgbe_ring_data * rdata,struct xgbe_packet_data * packet,unsigned int len)2396 static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
2397 struct xgbe_packet_data *packet,
2398 unsigned int len)
2399 {
2400 /* Always the full buffer if not the last descriptor */
2401 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2402 return rdata->rx.buf.dma_len;
2403
2404 /* Last descriptor so calculate how much of the buffer was used
2405 * for the last bit of data
2406 */
2407 return rdata->rx.len - len;
2408 }
2409
xgbe_tx_poll(struct xgbe_channel * channel)2410 static int xgbe_tx_poll(struct xgbe_channel *channel)
2411 {
2412 struct xgbe_prv_data *pdata = channel->pdata;
2413 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2414 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2415 struct xgbe_ring *ring = channel->tx_ring;
2416 struct xgbe_ring_data *rdata;
2417 struct xgbe_ring_desc *rdesc;
2418 struct net_device *netdev = pdata->netdev;
2419 struct netdev_queue *txq;
2420 int processed = 0;
2421 unsigned int tx_packets = 0, tx_bytes = 0;
2422 unsigned int cur;
2423
2424 DBGPR("-->xgbe_tx_poll\n");
2425
2426 /* Nothing to do if there isn't a Tx ring for this channel */
2427 if (!ring)
2428 return 0;
2429
2430 cur = ring->cur;
2431
2432 /* Be sure we get ring->cur before accessing descriptor data */
2433 smp_rmb();
2434
2435 txq = netdev_get_tx_queue(netdev, channel->queue_index);
2436
2437 while ((processed < XGBE_TX_DESC_MAX_PROC) &&
2438 (ring->dirty != cur)) {
2439 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
2440 rdesc = rdata->rdesc;
2441
2442 if (!hw_if->tx_complete(rdesc))
2443 break;
2444
2445 /* Make sure descriptor fields are read after reading the OWN
2446 * bit */
2447 dma_rmb();
2448
2449 if (netif_msg_tx_done(pdata))
2450 xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
2451
2452 if (hw_if->is_last_desc(rdesc)) {
2453 tx_packets += rdata->tx.packets;
2454 tx_bytes += rdata->tx.bytes;
2455 }
2456
2457 /* Free the SKB and reset the descriptor for re-use */
2458 desc_if->unmap_rdata(pdata, rdata);
2459 hw_if->tx_desc_reset(rdata);
2460
2461 processed++;
2462 ring->dirty++;
2463 }
2464
2465 if (!processed)
2466 return 0;
2467
2468 netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
2469
2470 if ((ring->tx.queue_stopped == 1) &&
2471 (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
2472 ring->tx.queue_stopped = 0;
2473 netif_tx_wake_queue(txq);
2474 }
2475
2476 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
2477
2478 return processed;
2479 }
2480
xgbe_rx_poll(struct xgbe_channel * channel,int budget)2481 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
2482 {
2483 struct xgbe_prv_data *pdata = channel->pdata;
2484 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2485 struct xgbe_ring *ring = channel->rx_ring;
2486 struct xgbe_ring_data *rdata;
2487 struct xgbe_packet_data *packet;
2488 struct net_device *netdev = pdata->netdev;
2489 struct napi_struct *napi;
2490 struct sk_buff *skb;
2491 struct skb_shared_hwtstamps *hwtstamps;
2492 unsigned int last, error, context_next, context;
2493 unsigned int len, buf1_len, buf2_len, max_len;
2494 unsigned int received = 0;
2495 int packet_count = 0;
2496
2497 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
2498
2499 /* Nothing to do if there isn't a Rx ring for this channel */
2500 if (!ring)
2501 return 0;
2502
2503 last = 0;
2504 context_next = 0;
2505
2506 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
2507
2508 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2509 packet = &ring->packet_data;
2510 while (packet_count < budget) {
2511 DBGPR(" cur = %d\n", ring->cur);
2512
2513 /* First time in loop see if we need to restore state */
2514 if (!received && rdata->state_saved) {
2515 skb = rdata->state.skb;
2516 error = rdata->state.error;
2517 len = rdata->state.len;
2518 } else {
2519 memset(packet, 0, sizeof(*packet));
2520 skb = NULL;
2521 error = 0;
2522 len = 0;
2523 }
2524
2525 read_again:
2526 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2527
2528 if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
2529 xgbe_rx_refresh(channel);
2530
2531 if (hw_if->dev_read(channel))
2532 break;
2533
2534 received++;
2535 ring->cur++;
2536
2537 last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2538 LAST);
2539 context_next = XGMAC_GET_BITS(packet->attributes,
2540 RX_PACKET_ATTRIBUTES,
2541 CONTEXT_NEXT);
2542 context = XGMAC_GET_BITS(packet->attributes,
2543 RX_PACKET_ATTRIBUTES,
2544 CONTEXT);
2545
2546 /* Earlier error, just drain the remaining data */
2547 if ((!last || context_next) && error)
2548 goto read_again;
2549
2550 if (error || packet->errors) {
2551 if (packet->errors)
2552 netif_err(pdata, rx_err, netdev,
2553 "error in received packet\n");
2554 dev_kfree_skb(skb);
2555 goto next_packet;
2556 }
2557
2558 if (!context) {
2559 /* Get the data length in the descriptor buffers */
2560 buf1_len = xgbe_rx_buf1_len(rdata, packet);
2561 len += buf1_len;
2562 buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
2563 len += buf2_len;
2564
2565 if (buf2_len > rdata->rx.buf.dma_len) {
2566 /* Hardware inconsistency within the descriptors
2567 * that has resulted in a length underflow.
2568 */
2569 error = 1;
2570 goto skip_data;
2571 }
2572
2573 if (!skb) {
2574 skb = xgbe_create_skb(pdata, napi, rdata,
2575 buf1_len);
2576 if (!skb) {
2577 error = 1;
2578 goto skip_data;
2579 }
2580 }
2581
2582 if (buf2_len) {
2583 dma_sync_single_range_for_cpu(pdata->dev,
2584 rdata->rx.buf.dma_base,
2585 rdata->rx.buf.dma_off,
2586 rdata->rx.buf.dma_len,
2587 DMA_FROM_DEVICE);
2588
2589 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2590 rdata->rx.buf.pa.pages,
2591 rdata->rx.buf.pa.pages_offset,
2592 buf2_len,
2593 rdata->rx.buf.dma_len);
2594 rdata->rx.buf.pa.pages = NULL;
2595 }
2596 }
2597
2598 skip_data:
2599 if (!last || context_next)
2600 goto read_again;
2601
2602 if (!skb || error) {
2603 dev_kfree_skb(skb);
2604 goto next_packet;
2605 }
2606
2607 /* Be sure we don't exceed the configured MTU */
2608 max_len = netdev->mtu + ETH_HLEN;
2609 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2610 (skb->protocol == htons(ETH_P_8021Q)))
2611 max_len += VLAN_HLEN;
2612
2613 if (skb->len > max_len) {
2614 netif_err(pdata, rx_err, netdev,
2615 "packet length exceeds configured MTU\n");
2616 dev_kfree_skb(skb);
2617 goto next_packet;
2618 }
2619
2620 if (netif_msg_pktdata(pdata))
2621 xgbe_print_pkt(netdev, skb, false);
2622
2623 skb_checksum_none_assert(skb);
2624 if (XGMAC_GET_BITS(packet->attributes,
2625 RX_PACKET_ATTRIBUTES, CSUM_DONE))
2626 skb->ip_summed = CHECKSUM_UNNECESSARY;
2627
2628 if (XGMAC_GET_BITS(packet->attributes,
2629 RX_PACKET_ATTRIBUTES, TNP)) {
2630 skb->encapsulation = 1;
2631
2632 if (XGMAC_GET_BITS(packet->attributes,
2633 RX_PACKET_ATTRIBUTES, TNPCSUM_DONE))
2634 skb->csum_level = 1;
2635 }
2636
2637 if (XGMAC_GET_BITS(packet->attributes,
2638 RX_PACKET_ATTRIBUTES, VLAN_CTAG))
2639 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2640 packet->vlan_ctag);
2641
2642 if (XGMAC_GET_BITS(packet->attributes,
2643 RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
2644 u64 nsec;
2645
2646 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
2647 packet->rx_tstamp);
2648 hwtstamps = skb_hwtstamps(skb);
2649 hwtstamps->hwtstamp = ns_to_ktime(nsec);
2650 }
2651
2652 if (XGMAC_GET_BITS(packet->attributes,
2653 RX_PACKET_ATTRIBUTES, RSS_HASH))
2654 skb_set_hash(skb, packet->rss_hash,
2655 packet->rss_hash_type);
2656
2657 skb->dev = netdev;
2658 skb->protocol = eth_type_trans(skb, netdev);
2659 skb_record_rx_queue(skb, channel->queue_index);
2660
2661 napi_gro_receive(napi, skb);
2662
2663 next_packet:
2664 packet_count++;
2665 }
2666
2667 /* Check if we need to save state before leaving */
2668 if (received && (!last || context_next)) {
2669 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2670 rdata->state_saved = 1;
2671 rdata->state.skb = skb;
2672 rdata->state.len = len;
2673 rdata->state.error = error;
2674 }
2675
2676 DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
2677
2678 return packet_count;
2679 }
2680
xgbe_one_poll(struct napi_struct * napi,int budget)2681 static int xgbe_one_poll(struct napi_struct *napi, int budget)
2682 {
2683 struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
2684 napi);
2685 struct xgbe_prv_data *pdata = channel->pdata;
2686 int processed = 0;
2687
2688 DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
2689
2690 /* Cleanup Tx ring first */
2691 xgbe_tx_poll(channel);
2692
2693 /* Process Rx ring next */
2694 processed = xgbe_rx_poll(channel, budget);
2695
2696 /* If we processed everything, we are done */
2697 if ((processed < budget) && napi_complete_done(napi, processed)) {
2698 /* Enable Tx and Rx interrupts */
2699 if (pdata->channel_irq_mode)
2700 xgbe_enable_rx_tx_int(pdata, channel);
2701 else
2702 enable_irq(channel->dma_irq);
2703 }
2704
2705 DBGPR("<--xgbe_one_poll: received = %d\n", processed);
2706
2707 return processed;
2708 }
2709
xgbe_all_poll(struct napi_struct * napi,int budget)2710 static int xgbe_all_poll(struct napi_struct *napi, int budget)
2711 {
2712 struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
2713 napi);
2714 struct xgbe_channel *channel;
2715 int ring_budget;
2716 int processed, last_processed;
2717 unsigned int i;
2718
2719 DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
2720
2721 processed = 0;
2722 ring_budget = budget / pdata->rx_ring_count;
2723 do {
2724 last_processed = processed;
2725
2726 for (i = 0; i < pdata->channel_count; i++) {
2727 channel = pdata->channel[i];
2728
2729 /* Cleanup Tx ring first */
2730 xgbe_tx_poll(channel);
2731
2732 /* Process Rx ring next */
2733 if (ring_budget > (budget - processed))
2734 ring_budget = budget - processed;
2735 processed += xgbe_rx_poll(channel, ring_budget);
2736 }
2737 } while ((processed < budget) && (processed != last_processed));
2738
2739 /* If we processed everything, we are done */
2740 if ((processed < budget) && napi_complete_done(napi, processed)) {
2741 /* Enable Tx and Rx interrupts */
2742 xgbe_enable_rx_tx_ints(pdata);
2743 }
2744
2745 DBGPR("<--xgbe_all_poll: received = %d\n", processed);
2746
2747 return processed;
2748 }
2749
xgbe_dump_tx_desc(struct xgbe_prv_data * pdata,struct xgbe_ring * ring,unsigned int idx,unsigned int count,unsigned int flag)2750 void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2751 unsigned int idx, unsigned int count, unsigned int flag)
2752 {
2753 struct xgbe_ring_data *rdata;
2754 struct xgbe_ring_desc *rdesc;
2755
2756 while (count--) {
2757 rdata = XGBE_GET_DESC_DATA(ring, idx);
2758 rdesc = rdata->rdesc;
2759 netdev_dbg(pdata->netdev,
2760 "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
2761 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
2762 le32_to_cpu(rdesc->desc0),
2763 le32_to_cpu(rdesc->desc1),
2764 le32_to_cpu(rdesc->desc2),
2765 le32_to_cpu(rdesc->desc3));
2766 idx++;
2767 }
2768 }
2769
xgbe_dump_rx_desc(struct xgbe_prv_data * pdata,struct xgbe_ring * ring,unsigned int idx)2770 void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2771 unsigned int idx)
2772 {
2773 struct xgbe_ring_data *rdata;
2774 struct xgbe_ring_desc *rdesc;
2775
2776 rdata = XGBE_GET_DESC_DATA(ring, idx);
2777 rdesc = rdata->rdesc;
2778 netdev_dbg(pdata->netdev,
2779 "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
2780 idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
2781 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
2782 }
2783
xgbe_print_pkt(struct net_device * netdev,struct sk_buff * skb,bool tx_rx)2784 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
2785 {
2786 struct ethhdr *eth = (struct ethhdr *)skb->data;
2787 unsigned char buffer[128];
2788 unsigned int i;
2789
2790 netdev_dbg(netdev, "\n************** SKB dump ****************\n");
2791
2792 netdev_dbg(netdev, "%s packet of %d bytes\n",
2793 (tx_rx ? "TX" : "RX"), skb->len);
2794
2795 netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
2796 netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
2797 netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
2798
2799 for (i = 0; i < skb->len; i += 32) {
2800 unsigned int len = min(skb->len - i, 32U);
2801
2802 hex_dump_to_buffer(&skb->data[i], len, 32, 1,
2803 buffer, sizeof(buffer), false);
2804 netdev_dbg(netdev, " %#06x: %s\n", i, buffer);
2805 }
2806
2807 netdev_dbg(netdev, "\n************** SKB dump ****************\n");
2808 }
2809