• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Xilinx Axi Ethernet device driver
4  *
5  * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
6  * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
7  * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8  * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9  * Copyright (c) 2010 - 2011 PetaLogix
10  * Copyright (c) 2019 - 2022 Calian Advanced Technologies
11  * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
12  *
13  * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
14  * and Spartan6.
15  *
16  * TODO:
17  *  - Add Axi Fifo support.
18  *  - Factor out Axi DMA code into separate driver.
19  *  - Test and fix basic multicast filtering.
20  *  - Add support for extended multicast filtering.
21  *  - Test basic VLAN support.
22  *  - Add support for extended VLAN support.
23  */
24 
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/etherdevice.h>
28 #include <linux/module.h>
29 #include <linux/netdevice.h>
30 #include <linux/of.h>
31 #include <linux/of_mdio.h>
32 #include <linux/of_net.h>
33 #include <linux/of_irq.h>
34 #include <linux/of_address.h>
35 #include <linux/platform_device.h>
36 #include <linux/skbuff.h>
37 #include <linux/math64.h>
38 #include <linux/phy.h>
39 #include <linux/mii.h>
40 #include <linux/ethtool.h>
41 #include <linux/dmaengine.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/dma/xilinx_dma.h>
44 #include <linux/circ_buf.h>
45 #include <net/netdev_queues.h>
46 
47 #include "xilinx_axienet.h"
48 
49 /* Descriptors defines for Tx and Rx DMA */
50 #define TX_BD_NUM_DEFAULT		128
51 #define RX_BD_NUM_DEFAULT		1024
52 #define TX_BD_NUM_MIN			(MAX_SKB_FRAGS + 1)
53 #define TX_BD_NUM_MAX			4096
54 #define RX_BD_NUM_MAX			4096
55 #define DMA_NUM_APP_WORDS		5
56 #define LEN_APP				4
57 #define RX_BUF_NUM_DEFAULT		128
58 
59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
60 #define DRIVER_NAME		"xaxienet"
61 #define DRIVER_DESCRIPTION	"Xilinx Axi Ethernet driver"
62 #define DRIVER_VERSION		"1.00a"
63 
64 #define AXIENET_REGS_N		40
65 
66 static void axienet_rx_submit_desc(struct net_device *ndev);
67 
68 /* Match table for of_platform binding */
69 static const struct of_device_id axienet_of_match[] = {
70 	{ .compatible = "xlnx,axi-ethernet-1.00.a", },
71 	{ .compatible = "xlnx,axi-ethernet-1.01.a", },
72 	{ .compatible = "xlnx,axi-ethernet-2.01.a", },
73 	{},
74 };
75 
76 MODULE_DEVICE_TABLE(of, axienet_of_match);
77 
78 /* Option table for setting up Axi Ethernet hardware options */
79 static struct axienet_option axienet_options[] = {
80 	/* Turn on jumbo packet support for both Rx and Tx */
81 	{
82 		.opt = XAE_OPTION_JUMBO,
83 		.reg = XAE_TC_OFFSET,
84 		.m_or = XAE_TC_JUM_MASK,
85 	}, {
86 		.opt = XAE_OPTION_JUMBO,
87 		.reg = XAE_RCW1_OFFSET,
88 		.m_or = XAE_RCW1_JUM_MASK,
89 	}, { /* Turn on VLAN packet support for both Rx and Tx */
90 		.opt = XAE_OPTION_VLAN,
91 		.reg = XAE_TC_OFFSET,
92 		.m_or = XAE_TC_VLAN_MASK,
93 	}, {
94 		.opt = XAE_OPTION_VLAN,
95 		.reg = XAE_RCW1_OFFSET,
96 		.m_or = XAE_RCW1_VLAN_MASK,
97 	}, { /* Turn on FCS stripping on receive packets */
98 		.opt = XAE_OPTION_FCS_STRIP,
99 		.reg = XAE_RCW1_OFFSET,
100 		.m_or = XAE_RCW1_FCS_MASK,
101 	}, { /* Turn on FCS insertion on transmit packets */
102 		.opt = XAE_OPTION_FCS_INSERT,
103 		.reg = XAE_TC_OFFSET,
104 		.m_or = XAE_TC_FCS_MASK,
105 	}, { /* Turn off length/type field checking on receive packets */
106 		.opt = XAE_OPTION_LENTYPE_ERR,
107 		.reg = XAE_RCW1_OFFSET,
108 		.m_or = XAE_RCW1_LT_DIS_MASK,
109 	}, { /* Turn on Rx flow control */
110 		.opt = XAE_OPTION_FLOW_CONTROL,
111 		.reg = XAE_FCC_OFFSET,
112 		.m_or = XAE_FCC_FCRX_MASK,
113 	}, { /* Turn on Tx flow control */
114 		.opt = XAE_OPTION_FLOW_CONTROL,
115 		.reg = XAE_FCC_OFFSET,
116 		.m_or = XAE_FCC_FCTX_MASK,
117 	}, { /* Turn on promiscuous frame filtering */
118 		.opt = XAE_OPTION_PROMISC,
119 		.reg = XAE_FMI_OFFSET,
120 		.m_or = XAE_FMI_PM_MASK,
121 	}, { /* Enable transmitter */
122 		.opt = XAE_OPTION_TXEN,
123 		.reg = XAE_TC_OFFSET,
124 		.m_or = XAE_TC_TX_MASK,
125 	}, { /* Enable receiver */
126 		.opt = XAE_OPTION_RXEN,
127 		.reg = XAE_RCW1_OFFSET,
128 		.m_or = XAE_RCW1_RX_MASK,
129 	},
130 	{}
131 };
132 
axienet_get_rx_desc(struct axienet_local * lp,int i)133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
134 {
135 	return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
136 }
137 
axienet_get_tx_desc(struct axienet_local * lp,int i)138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
139 {
140 	return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
141 }
142 
143 /**
144  * axienet_dma_in32 - Memory mapped Axi DMA register read
145  * @lp:		Pointer to axienet local structure
146  * @reg:	Address offset from the base address of the Axi DMA core
147  *
148  * Return: The contents of the Axi DMA register
149  *
150  * This function returns the contents of the corresponding Axi DMA register.
151  */
axienet_dma_in32(struct axienet_local * lp,off_t reg)152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
153 {
154 	return ioread32(lp->dma_regs + reg);
155 }
156 
desc_set_phys_addr(struct axienet_local * lp,dma_addr_t addr,struct axidma_bd * desc)157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
158 			       struct axidma_bd *desc)
159 {
160 	desc->phys = lower_32_bits(addr);
161 	if (lp->features & XAE_FEATURE_DMA_64BIT)
162 		desc->phys_msb = upper_32_bits(addr);
163 }
164 
desc_get_phys_addr(struct axienet_local * lp,struct axidma_bd * desc)165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
166 				     struct axidma_bd *desc)
167 {
168 	dma_addr_t ret = desc->phys;
169 
170 	if (lp->features & XAE_FEATURE_DMA_64BIT)
171 		ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
172 
173 	return ret;
174 }
175 
176 /**
177  * axienet_dma_bd_release - Release buffer descriptor rings
178  * @ndev:	Pointer to the net_device structure
179  *
180  * This function is used to release the descriptors allocated in
181  * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
182  * driver stop api is called.
183  */
axienet_dma_bd_release(struct net_device * ndev)184 static void axienet_dma_bd_release(struct net_device *ndev)
185 {
186 	int i;
187 	struct axienet_local *lp = netdev_priv(ndev);
188 
189 	/* If we end up here, tx_bd_v must have been DMA allocated. */
190 	dma_free_coherent(lp->dev,
191 			  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
192 			  lp->tx_bd_v,
193 			  lp->tx_bd_p);
194 
195 	if (!lp->rx_bd_v)
196 		return;
197 
198 	for (i = 0; i < lp->rx_bd_num; i++) {
199 		dma_addr_t phys;
200 
201 		/* A NULL skb means this descriptor has not been initialised
202 		 * at all.
203 		 */
204 		if (!lp->rx_bd_v[i].skb)
205 			break;
206 
207 		dev_kfree_skb(lp->rx_bd_v[i].skb);
208 
209 		/* For each descriptor, we programmed cntrl with the (non-zero)
210 		 * descriptor size, after it had been successfully allocated.
211 		 * So a non-zero value in there means we need to unmap it.
212 		 */
213 		if (lp->rx_bd_v[i].cntrl) {
214 			phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
215 			dma_unmap_single(lp->dev, phys,
216 					 lp->max_frm_size, DMA_FROM_DEVICE);
217 		}
218 	}
219 
220 	dma_free_coherent(lp->dev,
221 			  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
222 			  lp->rx_bd_v,
223 			  lp->rx_bd_p);
224 }
225 
226 /**
227  * axienet_usec_to_timer - Calculate IRQ delay timer value
228  * @lp:		Pointer to the axienet_local structure
229  * @coalesce_usec: Microseconds to convert into timer value
230  */
axienet_usec_to_timer(struct axienet_local * lp,u32 coalesce_usec)231 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
232 {
233 	u32 result;
234 	u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
235 
236 	if (lp->axi_clk)
237 		clk_rate = clk_get_rate(lp->axi_clk);
238 
239 	/* 1 Timeout Interval = 125 * (clock period of SG clock) */
240 	result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
241 					 (u64)125000000);
242 	if (result > 255)
243 		result = 255;
244 
245 	return result;
246 }
247 
248 /**
249  * axienet_dma_start - Set up DMA registers and start DMA operation
250  * @lp:		Pointer to the axienet_local structure
251  */
axienet_dma_start(struct axienet_local * lp)252 static void axienet_dma_start(struct axienet_local *lp)
253 {
254 	/* Start updating the Rx channel control register */
255 	lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
256 			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
257 	/* Only set interrupt delay timer if not generating an interrupt on
258 	 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
259 	 */
260 	if (lp->coalesce_count_rx > 1)
261 		lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
262 					<< XAXIDMA_DELAY_SHIFT) |
263 				 XAXIDMA_IRQ_DELAY_MASK;
264 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
265 
266 	/* Start updating the Tx channel control register */
267 	lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
268 			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
269 	/* Only set interrupt delay timer if not generating an interrupt on
270 	 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
271 	 */
272 	if (lp->coalesce_count_tx > 1)
273 		lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
274 					<< XAXIDMA_DELAY_SHIFT) |
275 				 XAXIDMA_IRQ_DELAY_MASK;
276 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
277 
278 	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
279 	 * halted state. This will make the Rx side ready for reception.
280 	 */
281 	axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
282 	lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
283 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
284 	axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
285 			     (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
286 
287 	/* Write to the RS (Run-stop) bit in the Tx channel control register.
288 	 * Tx channel is now ready to run. But only after we write to the
289 	 * tail pointer register that the Tx channel will start transmitting.
290 	 */
291 	axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
292 	lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
293 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
294 }
295 
296 /**
297  * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
298  * @ndev:	Pointer to the net_device structure
299  *
300  * Return: 0, on success -ENOMEM, on failure
301  *
302  * This function is called to initialize the Rx and Tx DMA descriptor
303  * rings. This initializes the descriptors with required default values
304  * and is called when Axi Ethernet driver reset is called.
305  */
axienet_dma_bd_init(struct net_device * ndev)306 static int axienet_dma_bd_init(struct net_device *ndev)
307 {
308 	int i;
309 	struct sk_buff *skb;
310 	struct axienet_local *lp = netdev_priv(ndev);
311 
312 	/* Reset the indexes which are used for accessing the BDs */
313 	lp->tx_bd_ci = 0;
314 	lp->tx_bd_tail = 0;
315 	lp->rx_bd_ci = 0;
316 
317 	/* Allocate the Tx and Rx buffer descriptors. */
318 	lp->tx_bd_v = dma_alloc_coherent(lp->dev,
319 					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
320 					 &lp->tx_bd_p, GFP_KERNEL);
321 	if (!lp->tx_bd_v)
322 		return -ENOMEM;
323 
324 	lp->rx_bd_v = dma_alloc_coherent(lp->dev,
325 					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
326 					 &lp->rx_bd_p, GFP_KERNEL);
327 	if (!lp->rx_bd_v)
328 		goto out;
329 
330 	for (i = 0; i < lp->tx_bd_num; i++) {
331 		dma_addr_t addr = lp->tx_bd_p +
332 				  sizeof(*lp->tx_bd_v) *
333 				  ((i + 1) % lp->tx_bd_num);
334 
335 		lp->tx_bd_v[i].next = lower_32_bits(addr);
336 		if (lp->features & XAE_FEATURE_DMA_64BIT)
337 			lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
338 	}
339 
340 	for (i = 0; i < lp->rx_bd_num; i++) {
341 		dma_addr_t addr;
342 
343 		addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
344 			((i + 1) % lp->rx_bd_num);
345 		lp->rx_bd_v[i].next = lower_32_bits(addr);
346 		if (lp->features & XAE_FEATURE_DMA_64BIT)
347 			lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
348 
349 		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
350 		if (!skb)
351 			goto out;
352 
353 		lp->rx_bd_v[i].skb = skb;
354 		addr = dma_map_single(lp->dev, skb->data,
355 				      lp->max_frm_size, DMA_FROM_DEVICE);
356 		if (dma_mapping_error(lp->dev, addr)) {
357 			netdev_err(ndev, "DMA mapping error\n");
358 			goto out;
359 		}
360 		desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
361 
362 		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
363 	}
364 
365 	axienet_dma_start(lp);
366 
367 	return 0;
368 out:
369 	axienet_dma_bd_release(ndev);
370 	return -ENOMEM;
371 }
372 
373 /**
374  * axienet_set_mac_address - Write the MAC address
375  * @ndev:	Pointer to the net_device structure
376  * @address:	6 byte Address to be written as MAC address
377  *
378  * This function is called to initialize the MAC address of the Axi Ethernet
379  * core. It writes to the UAW0 and UAW1 registers of the core.
380  */
axienet_set_mac_address(struct net_device * ndev,const void * address)381 static void axienet_set_mac_address(struct net_device *ndev,
382 				    const void *address)
383 {
384 	struct axienet_local *lp = netdev_priv(ndev);
385 
386 	if (address)
387 		eth_hw_addr_set(ndev, address);
388 	if (!is_valid_ether_addr(ndev->dev_addr))
389 		eth_hw_addr_random(ndev);
390 
391 	/* Set up unicast MAC address filter set its mac address */
392 	axienet_iow(lp, XAE_UAW0_OFFSET,
393 		    (ndev->dev_addr[0]) |
394 		    (ndev->dev_addr[1] << 8) |
395 		    (ndev->dev_addr[2] << 16) |
396 		    (ndev->dev_addr[3] << 24));
397 	axienet_iow(lp, XAE_UAW1_OFFSET,
398 		    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
399 		      ~XAE_UAW1_UNICASTADDR_MASK) |
400 		     (ndev->dev_addr[4] |
401 		     (ndev->dev_addr[5] << 8))));
402 }
403 
404 /**
405  * netdev_set_mac_address - Write the MAC address (from outside the driver)
406  * @ndev:	Pointer to the net_device structure
407  * @p:		6 byte Address to be written as MAC address
408  *
409  * Return: 0 for all conditions. Presently, there is no failure case.
410  *
411  * This function is called to initialize the MAC address of the Axi Ethernet
412  * core. It calls the core specific axienet_set_mac_address. This is the
413  * function that goes into net_device_ops structure entry ndo_set_mac_address.
414  */
netdev_set_mac_address(struct net_device * ndev,void * p)415 static int netdev_set_mac_address(struct net_device *ndev, void *p)
416 {
417 	struct sockaddr *addr = p;
418 
419 	axienet_set_mac_address(ndev, addr->sa_data);
420 	return 0;
421 }
422 
423 /**
424  * axienet_set_multicast_list - Prepare the multicast table
425  * @ndev:	Pointer to the net_device structure
426  *
427  * This function is called to initialize the multicast table during
428  * initialization. The Axi Ethernet basic multicast support has a four-entry
429  * multicast table which is initialized here. Additionally this function
430  * goes into the net_device_ops structure entry ndo_set_multicast_list. This
431  * means whenever the multicast table entries need to be updated this
432  * function gets called.
433  */
axienet_set_multicast_list(struct net_device * ndev)434 static void axienet_set_multicast_list(struct net_device *ndev)
435 {
436 	int i = 0;
437 	u32 reg, af0reg, af1reg;
438 	struct axienet_local *lp = netdev_priv(ndev);
439 
440 	reg = axienet_ior(lp, XAE_FMI_OFFSET);
441 	reg &= ~XAE_FMI_PM_MASK;
442 	if (ndev->flags & IFF_PROMISC)
443 		reg |= XAE_FMI_PM_MASK;
444 	else
445 		reg &= ~XAE_FMI_PM_MASK;
446 	axienet_iow(lp, XAE_FMI_OFFSET, reg);
447 
448 	if (ndev->flags & IFF_ALLMULTI ||
449 	    netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
450 		reg &= 0xFFFFFF00;
451 		axienet_iow(lp, XAE_FMI_OFFSET, reg);
452 		axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
453 		axienet_iow(lp, XAE_AF1_OFFSET, 0);
454 		axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
455 		axienet_iow(lp, XAE_AM1_OFFSET, 0);
456 		axienet_iow(lp, XAE_FFE_OFFSET, 1);
457 		i = 1;
458 	} else if (!netdev_mc_empty(ndev)) {
459 		struct netdev_hw_addr *ha;
460 
461 		netdev_for_each_mc_addr(ha, ndev) {
462 			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
463 				break;
464 
465 			af0reg = (ha->addr[0]);
466 			af0reg |= (ha->addr[1] << 8);
467 			af0reg |= (ha->addr[2] << 16);
468 			af0reg |= (ha->addr[3] << 24);
469 
470 			af1reg = (ha->addr[4]);
471 			af1reg |= (ha->addr[5] << 8);
472 
473 			reg &= 0xFFFFFF00;
474 			reg |= i;
475 
476 			axienet_iow(lp, XAE_FMI_OFFSET, reg);
477 			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
478 			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
479 			axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
480 			axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
481 			axienet_iow(lp, XAE_FFE_OFFSET, 1);
482 			i++;
483 		}
484 	}
485 
486 	for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
487 		reg &= 0xFFFFFF00;
488 		reg |= i;
489 		axienet_iow(lp, XAE_FMI_OFFSET, reg);
490 		axienet_iow(lp, XAE_FFE_OFFSET, 0);
491 	}
492 }
493 
494 /**
495  * axienet_setoptions - Set an Axi Ethernet option
496  * @ndev:	Pointer to the net_device structure
497  * @options:	Option to be enabled/disabled
498  *
499  * The Axi Ethernet core has multiple features which can be selectively turned
500  * on or off. The typical options could be jumbo frame option, basic VLAN
501  * option, promiscuous mode option etc. This function is used to set or clear
502  * these options in the Axi Ethernet hardware. This is done through
503  * axienet_option structure .
504  */
axienet_setoptions(struct net_device * ndev,u32 options)505 static void axienet_setoptions(struct net_device *ndev, u32 options)
506 {
507 	int reg;
508 	struct axienet_local *lp = netdev_priv(ndev);
509 	struct axienet_option *tp = &axienet_options[0];
510 
511 	while (tp->opt) {
512 		reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
513 		if (options & tp->opt)
514 			reg |= tp->m_or;
515 		axienet_iow(lp, tp->reg, reg);
516 		tp++;
517 	}
518 
519 	lp->options |= options;
520 }
521 
axienet_stat(struct axienet_local * lp,enum temac_stat stat)522 static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
523 {
524 	u32 counter;
525 
526 	if (lp->reset_in_progress)
527 		return lp->hw_stat_base[stat];
528 
529 	counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
530 	return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
531 }
532 
axienet_stats_update(struct axienet_local * lp,bool reset)533 static void axienet_stats_update(struct axienet_local *lp, bool reset)
534 {
535 	enum temac_stat stat;
536 
537 	write_seqcount_begin(&lp->hw_stats_seqcount);
538 	lp->reset_in_progress = reset;
539 	for (stat = 0; stat < STAT_COUNT; stat++) {
540 		u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
541 
542 		lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
543 		lp->hw_last_counter[stat] = counter;
544 	}
545 	write_seqcount_end(&lp->hw_stats_seqcount);
546 }
547 
axienet_refresh_stats(struct work_struct * work)548 static void axienet_refresh_stats(struct work_struct *work)
549 {
550 	struct axienet_local *lp = container_of(work, struct axienet_local,
551 						stats_work.work);
552 
553 	mutex_lock(&lp->stats_lock);
554 	axienet_stats_update(lp, false);
555 	mutex_unlock(&lp->stats_lock);
556 
557 	/* Just less than 2^32 bytes at 2.5 GBit/s */
558 	schedule_delayed_work(&lp->stats_work, 13 * HZ);
559 }
560 
__axienet_device_reset(struct axienet_local * lp)561 static int __axienet_device_reset(struct axienet_local *lp)
562 {
563 	u32 value;
564 	int ret;
565 
566 	/* Save statistics counters in case they will be reset */
567 	mutex_lock(&lp->stats_lock);
568 	if (lp->features & XAE_FEATURE_STATS)
569 		axienet_stats_update(lp, true);
570 
571 	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
572 	 * process of Axi DMA takes a while to complete as all pending
573 	 * commands/transfers will be flushed or completed during this
574 	 * reset process.
575 	 * Note that even though both TX and RX have their own reset register,
576 	 * they both reset the entire DMA core, so only one needs to be used.
577 	 */
578 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
579 	ret = read_poll_timeout(axienet_dma_in32, value,
580 				!(value & XAXIDMA_CR_RESET_MASK),
581 				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
582 				XAXIDMA_TX_CR_OFFSET);
583 	if (ret) {
584 		dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
585 		goto out;
586 	}
587 
588 	/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
589 	ret = read_poll_timeout(axienet_ior, value,
590 				value & XAE_INT_PHYRSTCMPLT_MASK,
591 				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
592 				XAE_IS_OFFSET);
593 	if (ret) {
594 		dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
595 		goto out;
596 	}
597 
598 	/* Update statistics counters with new values */
599 	if (lp->features & XAE_FEATURE_STATS) {
600 		enum temac_stat stat;
601 
602 		write_seqcount_begin(&lp->hw_stats_seqcount);
603 		lp->reset_in_progress = false;
604 		for (stat = 0; stat < STAT_COUNT; stat++) {
605 			u32 counter =
606 				axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
607 
608 			lp->hw_stat_base[stat] +=
609 				lp->hw_last_counter[stat] - counter;
610 			lp->hw_last_counter[stat] = counter;
611 		}
612 		write_seqcount_end(&lp->hw_stats_seqcount);
613 	}
614 
615 out:
616 	mutex_unlock(&lp->stats_lock);
617 	return ret;
618 }
619 
620 /**
621  * axienet_dma_stop - Stop DMA operation
622  * @lp:		Pointer to the axienet_local structure
623  */
axienet_dma_stop(struct axienet_local * lp)624 static void axienet_dma_stop(struct axienet_local *lp)
625 {
626 	int count;
627 	u32 cr, sr;
628 
629 	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
630 	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
631 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
632 	synchronize_irq(lp->rx_irq);
633 
634 	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
635 	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
636 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
637 	synchronize_irq(lp->tx_irq);
638 
639 	/* Give DMAs a chance to halt gracefully */
640 	sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
641 	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
642 		msleep(20);
643 		sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
644 	}
645 
646 	sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
647 	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
648 		msleep(20);
649 		sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
650 	}
651 
652 	/* Do a reset to ensure DMA is really stopped */
653 	axienet_lock_mii(lp);
654 	__axienet_device_reset(lp);
655 	axienet_unlock_mii(lp);
656 }
657 
658 /**
659  * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
660  * @ndev:	Pointer to the net_device structure
661  *
662  * This function is called to reset and initialize the Axi Ethernet core. This
663  * is typically called during initialization. It does a reset of the Axi DMA
664  * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
665  * are connected to Axi Ethernet reset lines, this in turn resets the Axi
666  * Ethernet core. No separate hardware reset is done for the Axi Ethernet
667  * core.
668  * Returns 0 on success or a negative error number otherwise.
669  */
axienet_device_reset(struct net_device * ndev)670 static int axienet_device_reset(struct net_device *ndev)
671 {
672 	u32 axienet_status;
673 	struct axienet_local *lp = netdev_priv(ndev);
674 	int ret;
675 
676 	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
677 	lp->options |= XAE_OPTION_VLAN;
678 	lp->options &= (~XAE_OPTION_JUMBO);
679 
680 	if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
681 		lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
682 					XAE_TRL_SIZE;
683 
684 		if (lp->max_frm_size <= lp->rxmem)
685 			lp->options |= XAE_OPTION_JUMBO;
686 	}
687 
688 	if (!lp->use_dmaengine) {
689 		ret = __axienet_device_reset(lp);
690 		if (ret)
691 			return ret;
692 
693 		ret = axienet_dma_bd_init(ndev);
694 		if (ret) {
695 			netdev_err(ndev, "%s: descriptor allocation failed\n",
696 				   __func__);
697 			return ret;
698 		}
699 	}
700 
701 	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
702 	axienet_status &= ~XAE_RCW1_RX_MASK;
703 	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
704 
705 	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
706 	if (axienet_status & XAE_INT_RXRJECT_MASK)
707 		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
708 	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
709 		    XAE_INT_RECV_ERROR_MASK : 0);
710 
711 	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
712 
713 	/* Sync default options with HW but leave receiver and
714 	 * transmitter disabled.
715 	 */
716 	axienet_setoptions(ndev, lp->options &
717 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
718 	axienet_set_mac_address(ndev, NULL);
719 	axienet_set_multicast_list(ndev);
720 	axienet_setoptions(ndev, lp->options);
721 
722 	netif_trans_update(ndev);
723 
724 	return 0;
725 }
726 
727 /**
728  * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
729  * @lp:		Pointer to the axienet_local structure
730  * @first_bd:	Index of first descriptor to clean up
731  * @nr_bds:	Max number of descriptors to clean up
732  * @force:	Whether to clean descriptors even if not complete
733  * @sizep:	Pointer to a u32 filled with the total sum of all bytes
734  *		in all cleaned-up descriptors. Ignored if NULL.
735  * @budget:	NAPI budget (use 0 when not called from NAPI poll)
736  *
737  * Would either be called after a successful transmit operation, or after
738  * there was an error when setting up the chain.
739  * Returns the number of packets handled.
740  */
axienet_free_tx_chain(struct axienet_local * lp,u32 first_bd,int nr_bds,bool force,u32 * sizep,int budget)741 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
742 				 int nr_bds, bool force, u32 *sizep, int budget)
743 {
744 	struct axidma_bd *cur_p;
745 	unsigned int status;
746 	int i, packets = 0;
747 	dma_addr_t phys;
748 
749 	for (i = 0; i < nr_bds; i++) {
750 		cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
751 		status = cur_p->status;
752 
753 		/* If force is not specified, clean up only descriptors
754 		 * that have been completed by the MAC.
755 		 */
756 		if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
757 			break;
758 
759 		/* Ensure we see complete descriptor update */
760 		dma_rmb();
761 		phys = desc_get_phys_addr(lp, cur_p);
762 		dma_unmap_single(lp->dev, phys,
763 				 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
764 				 DMA_TO_DEVICE);
765 
766 		if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
767 			napi_consume_skb(cur_p->skb, budget);
768 			packets++;
769 		}
770 
771 		cur_p->app0 = 0;
772 		cur_p->app1 = 0;
773 		cur_p->app2 = 0;
774 		cur_p->app4 = 0;
775 		cur_p->skb = NULL;
776 		/* ensure our transmit path and device don't prematurely see status cleared */
777 		wmb();
778 		cur_p->cntrl = 0;
779 		cur_p->status = 0;
780 
781 		if (sizep)
782 			*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
783 	}
784 
785 	if (!force) {
786 		lp->tx_bd_ci += i;
787 		if (lp->tx_bd_ci >= lp->tx_bd_num)
788 			lp->tx_bd_ci %= lp->tx_bd_num;
789 	}
790 
791 	return packets;
792 }
793 
794 /**
795  * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
796  * @lp:		Pointer to the axienet_local structure
797  * @num_frag:	The number of BDs to check for
798  *
799  * Return: 0, on success
800  *	    NETDEV_TX_BUSY, if any of the descriptors are not free
801  *
802  * This function is invoked before BDs are allocated and transmission starts.
803  * This function returns 0 if a BD or group of BDs can be allocated for
804  * transmission. If the BD or any of the BDs are not free the function
805  * returns a busy status.
806  */
axienet_check_tx_bd_space(struct axienet_local * lp,int num_frag)807 static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
808 					    int num_frag)
809 {
810 	struct axidma_bd *cur_p;
811 
812 	/* Ensure we see all descriptor updates from device or TX polling */
813 	rmb();
814 	cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
815 			     lp->tx_bd_num];
816 	if (cur_p->cntrl)
817 		return NETDEV_TX_BUSY;
818 	return 0;
819 }
820 
821 /**
822  * axienet_dma_tx_cb - DMA engine callback for TX channel.
823  * @data:       Pointer to the axienet_local structure.
824  * @result:     error reporting through dmaengine_result.
825  * This function is called by dmaengine driver for TX channel to notify
826  * that the transmit is done.
827  */
axienet_dma_tx_cb(void * data,const struct dmaengine_result * result)828 static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
829 {
830 	struct skbuf_dma_descriptor *skbuf_dma;
831 	struct axienet_local *lp = data;
832 	struct netdev_queue *txq;
833 	int len;
834 
835 	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
836 	len = skbuf_dma->skb->len;
837 	txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
838 	u64_stats_update_begin(&lp->tx_stat_sync);
839 	u64_stats_add(&lp->tx_bytes, len);
840 	u64_stats_add(&lp->tx_packets, 1);
841 	u64_stats_update_end(&lp->tx_stat_sync);
842 	dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
843 	dev_consume_skb_any(skbuf_dma->skb);
844 	netif_txq_completed_wake(txq, 1, len,
845 				 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
846 				 2);
847 }
848 
849 /**
850  * axienet_start_xmit_dmaengine - Starts the transmission.
851  * @skb:        sk_buff pointer that contains data to be Txed.
852  * @ndev:       Pointer to net_device structure.
853  *
854  * Return: NETDEV_TX_OK on success or any non space errors.
855  *         NETDEV_TX_BUSY when free element in TX skb ring buffer
856  *         is not available.
857  *
858  * This function is invoked to initiate transmission. The
859  * function sets the skbs, register dma callback API and submit
860  * the dma transaction.
861  * Additionally if checksum offloading is supported,
862  * it populates AXI Stream Control fields with appropriate values.
863  */
864 static netdev_tx_t
axienet_start_xmit_dmaengine(struct sk_buff * skb,struct net_device * ndev)865 axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
866 {
867 	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
868 	struct axienet_local *lp = netdev_priv(ndev);
869 	u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
870 	struct skbuf_dma_descriptor *skbuf_dma;
871 	struct dma_device *dma_dev;
872 	struct netdev_queue *txq;
873 	u32 csum_start_off;
874 	u32 csum_index_off;
875 	int sg_len;
876 	int ret;
877 
878 	dma_dev = lp->tx_chan->device;
879 	sg_len = skb_shinfo(skb)->nr_frags + 1;
880 	if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) {
881 		netif_stop_queue(ndev);
882 		if (net_ratelimit())
883 			netdev_warn(ndev, "TX ring unexpectedly full\n");
884 		return NETDEV_TX_BUSY;
885 	}
886 
887 	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
888 	if (!skbuf_dma)
889 		goto xmit_error_drop_skb;
890 
891 	lp->tx_ring_head++;
892 	sg_init_table(skbuf_dma->sgl, sg_len);
893 	ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
894 	if (ret < 0)
895 		goto xmit_error_drop_skb;
896 
897 	ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
898 	if (!ret)
899 		goto xmit_error_drop_skb;
900 
901 	/* Fill up app fields for checksum */
902 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
903 		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
904 			/* Tx Full Checksum Offload Enabled */
905 			app_metadata[0] |= 2;
906 		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
907 			csum_start_off = skb_transport_offset(skb);
908 			csum_index_off = csum_start_off + skb->csum_offset;
909 			/* Tx Partial Checksum Offload Enabled */
910 			app_metadata[0] |= 1;
911 			app_metadata[1] = (csum_start_off << 16) | csum_index_off;
912 		}
913 	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
914 		app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
915 	}
916 
917 	dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
918 			sg_len, DMA_MEM_TO_DEV,
919 			DMA_PREP_INTERRUPT, (void *)app_metadata);
920 	if (!dma_tx_desc)
921 		goto xmit_error_unmap_sg;
922 
923 	skbuf_dma->skb = skb;
924 	skbuf_dma->sg_len = sg_len;
925 	dma_tx_desc->callback_param = lp;
926 	dma_tx_desc->callback_result = axienet_dma_tx_cb;
927 	txq = skb_get_tx_queue(lp->ndev, skb);
928 	netdev_tx_sent_queue(txq, skb->len);
929 	netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
930 			     1, 2);
931 
932 	dmaengine_submit(dma_tx_desc);
933 	dma_async_issue_pending(lp->tx_chan);
934 	return NETDEV_TX_OK;
935 
936 xmit_error_unmap_sg:
937 	dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
938 xmit_error_drop_skb:
939 	dev_kfree_skb_any(skb);
940 	return NETDEV_TX_OK;
941 }
942 
943 /**
944  * axienet_tx_poll - Invoked once a transmit is completed by the
945  * Axi DMA Tx channel.
946  * @napi:	Pointer to NAPI structure.
947  * @budget:	Max number of TX packets to process.
948  *
949  * Return: Number of TX packets processed.
950  *
951  * This function is invoked from the NAPI processing to notify the completion
952  * of transmit operation. It clears fields in the corresponding Tx BDs and
953  * unmaps the corresponding buffer so that CPU can regain ownership of the
954  * buffer. It finally invokes "netif_wake_queue" to restart transmission if
955  * required.
956  */
axienet_tx_poll(struct napi_struct * napi,int budget)957 static int axienet_tx_poll(struct napi_struct *napi, int budget)
958 {
959 	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
960 	struct net_device *ndev = lp->ndev;
961 	u32 size = 0;
962 	int packets;
963 
964 	packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
965 					&size, budget);
966 
967 	if (packets) {
968 		u64_stats_update_begin(&lp->tx_stat_sync);
969 		u64_stats_add(&lp->tx_packets, packets);
970 		u64_stats_add(&lp->tx_bytes, size);
971 		u64_stats_update_end(&lp->tx_stat_sync);
972 
973 		/* Matches barrier in axienet_start_xmit */
974 		smp_mb();
975 
976 		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
977 			netif_wake_queue(ndev);
978 	}
979 
980 	if (packets < budget && napi_complete_done(napi, packets)) {
981 		/* Re-enable TX completion interrupts. This should
982 		 * cause an immediate interrupt if any TX packets are
983 		 * already pending.
984 		 */
985 		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
986 	}
987 	return packets;
988 }
989 
990 /**
991  * axienet_start_xmit - Starts the transmission.
992  * @skb:	sk_buff pointer that contains data to be Txed.
993  * @ndev:	Pointer to net_device structure.
994  *
995  * Return: NETDEV_TX_OK, on success
996  *	    NETDEV_TX_BUSY, if any of the descriptors are not free
997  *
998  * This function is invoked from upper layers to initiate transmission. The
999  * function uses the next available free BDs and populates their fields to
1000  * start the transmission. Additionally if checksum offloading is supported,
1001  * it populates AXI Stream Control fields with appropriate values.
1002  */
1003 static netdev_tx_t
axienet_start_xmit(struct sk_buff * skb,struct net_device * ndev)1004 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1005 {
1006 	u32 ii;
1007 	u32 num_frag;
1008 	u32 csum_start_off;
1009 	u32 csum_index_off;
1010 	skb_frag_t *frag;
1011 	dma_addr_t tail_p, phys;
1012 	u32 orig_tail_ptr, new_tail_ptr;
1013 	struct axienet_local *lp = netdev_priv(ndev);
1014 	struct axidma_bd *cur_p;
1015 
1016 	orig_tail_ptr = lp->tx_bd_tail;
1017 	new_tail_ptr = orig_tail_ptr;
1018 
1019 	num_frag = skb_shinfo(skb)->nr_frags;
1020 	cur_p = &lp->tx_bd_v[orig_tail_ptr];
1021 
1022 	if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
1023 		/* Should not happen as last start_xmit call should have
1024 		 * checked for sufficient space and queue should only be
1025 		 * woken when sufficient space is available.
1026 		 */
1027 		netif_stop_queue(ndev);
1028 		if (net_ratelimit())
1029 			netdev_warn(ndev, "TX ring unexpectedly full\n");
1030 		return NETDEV_TX_BUSY;
1031 	}
1032 
1033 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1034 		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1035 			/* Tx Full Checksum Offload Enabled */
1036 			cur_p->app0 |= 2;
1037 		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
1038 			csum_start_off = skb_transport_offset(skb);
1039 			csum_index_off = csum_start_off + skb->csum_offset;
1040 			/* Tx Partial Checksum Offload Enabled */
1041 			cur_p->app0 |= 1;
1042 			cur_p->app1 = (csum_start_off << 16) | csum_index_off;
1043 		}
1044 	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1045 		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
1046 	}
1047 
1048 	phys = dma_map_single(lp->dev, skb->data,
1049 			      skb_headlen(skb), DMA_TO_DEVICE);
1050 	if (unlikely(dma_mapping_error(lp->dev, phys))) {
1051 		if (net_ratelimit())
1052 			netdev_err(ndev, "TX DMA mapping error\n");
1053 		ndev->stats.tx_dropped++;
1054 		dev_kfree_skb_any(skb);
1055 		return NETDEV_TX_OK;
1056 	}
1057 	desc_set_phys_addr(lp, phys, cur_p);
1058 	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
1059 
1060 	for (ii = 0; ii < num_frag; ii++) {
1061 		if (++new_tail_ptr >= lp->tx_bd_num)
1062 			new_tail_ptr = 0;
1063 		cur_p = &lp->tx_bd_v[new_tail_ptr];
1064 		frag = &skb_shinfo(skb)->frags[ii];
1065 		phys = dma_map_single(lp->dev,
1066 				      skb_frag_address(frag),
1067 				      skb_frag_size(frag),
1068 				      DMA_TO_DEVICE);
1069 		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1070 			if (net_ratelimit())
1071 				netdev_err(ndev, "TX DMA mapping error\n");
1072 			ndev->stats.tx_dropped++;
1073 			axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1074 					      true, NULL, 0);
1075 			dev_kfree_skb_any(skb);
1076 			return NETDEV_TX_OK;
1077 		}
1078 		desc_set_phys_addr(lp, phys, cur_p);
1079 		cur_p->cntrl = skb_frag_size(frag);
1080 	}
1081 
1082 	cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
1083 	cur_p->skb = skb;
1084 
1085 	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1086 	if (++new_tail_ptr >= lp->tx_bd_num)
1087 		new_tail_ptr = 0;
1088 	WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
1089 
1090 	/* Start the transfer */
1091 	axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1092 
1093 	/* Stop queue if next transmit may not have space */
1094 	if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1095 		netif_stop_queue(ndev);
1096 
1097 		/* Matches barrier in axienet_tx_poll */
1098 		smp_mb();
1099 
1100 		/* Space might have just been freed - check again */
1101 		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1102 			netif_wake_queue(ndev);
1103 	}
1104 
1105 	return NETDEV_TX_OK;
1106 }
1107 
1108 /**
1109  * axienet_dma_rx_cb - DMA engine callback for RX channel.
1110  * @data:       Pointer to the skbuf_dma_descriptor structure.
1111  * @result:     error reporting through dmaengine_result.
1112  * This function is called by dmaengine driver for RX channel to notify
1113  * that the packet is received.
1114  */
axienet_dma_rx_cb(void * data,const struct dmaengine_result * result)1115 static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1116 {
1117 	struct skbuf_dma_descriptor *skbuf_dma;
1118 	size_t meta_len, meta_max_len, rx_len;
1119 	struct axienet_local *lp = data;
1120 	struct sk_buff *skb;
1121 	u32 *app_metadata;
1122 	int i;
1123 
1124 	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1125 	skb = skbuf_dma->skb;
1126 	app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1127 						       &meta_max_len);
1128 	dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1129 			 DMA_FROM_DEVICE);
1130 
1131 	if (IS_ERR(app_metadata)) {
1132 		if (net_ratelimit())
1133 			netdev_err(lp->ndev, "Failed to get RX metadata pointer\n");
1134 		dev_kfree_skb_any(skb);
1135 		lp->ndev->stats.rx_dropped++;
1136 		goto rx_submit;
1137 	}
1138 
1139 	/* TODO: Derive app word index programmatically */
1140 	rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1141 	skb_put(skb, rx_len);
1142 	skb->protocol = eth_type_trans(skb, lp->ndev);
1143 	skb->ip_summed = CHECKSUM_NONE;
1144 
1145 	__netif_rx(skb);
1146 	u64_stats_update_begin(&lp->rx_stat_sync);
1147 	u64_stats_add(&lp->rx_packets, 1);
1148 	u64_stats_add(&lp->rx_bytes, rx_len);
1149 	u64_stats_update_end(&lp->rx_stat_sync);
1150 
1151 rx_submit:
1152 	for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail,
1153 				   RX_BUF_NUM_DEFAULT); i++)
1154 		axienet_rx_submit_desc(lp->ndev);
1155 	dma_async_issue_pending(lp->rx_chan);
1156 }
1157 
1158 /**
1159  * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1160  * @napi:	Pointer to NAPI structure.
1161  * @budget:	Max number of RX packets to process.
1162  *
1163  * Return: Number of RX packets processed.
1164  */
axienet_rx_poll(struct napi_struct * napi,int budget)1165 static int axienet_rx_poll(struct napi_struct *napi, int budget)
1166 {
1167 	u32 length;
1168 	u32 csumstatus;
1169 	u32 size = 0;
1170 	int packets = 0;
1171 	dma_addr_t tail_p = 0;
1172 	struct axidma_bd *cur_p;
1173 	struct sk_buff *skb, *new_skb;
1174 	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
1175 
1176 	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1177 
1178 	while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
1179 		dma_addr_t phys;
1180 
1181 		/* Ensure we see complete descriptor update */
1182 		dma_rmb();
1183 
1184 		skb = cur_p->skb;
1185 		cur_p->skb = NULL;
1186 
1187 		/* skb could be NULL if a previous pass already received the
1188 		 * packet for this slot in the ring, but failed to refill it
1189 		 * with a newly allocated buffer. In this case, don't try to
1190 		 * receive it again.
1191 		 */
1192 		if (likely(skb)) {
1193 			length = cur_p->app4 & 0x0000FFFF;
1194 
1195 			phys = desc_get_phys_addr(lp, cur_p);
1196 			dma_unmap_single(lp->dev, phys, lp->max_frm_size,
1197 					 DMA_FROM_DEVICE);
1198 
1199 			skb_put(skb, length);
1200 			skb->protocol = eth_type_trans(skb, lp->ndev);
1201 			/*skb_checksum_none_assert(skb);*/
1202 			skb->ip_summed = CHECKSUM_NONE;
1203 
1204 			/* if we're doing Rx csum offload, set it up */
1205 			if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1206 				csumstatus = (cur_p->app2 &
1207 					      XAE_FULL_CSUM_STATUS_MASK) >> 3;
1208 				if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1209 				    csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1210 					skb->ip_summed = CHECKSUM_UNNECESSARY;
1211 				}
1212 			} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
1213 				skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1214 				skb->ip_summed = CHECKSUM_COMPLETE;
1215 			}
1216 
1217 			napi_gro_receive(napi, skb);
1218 
1219 			size += length;
1220 			packets++;
1221 		}
1222 
1223 		new_skb = napi_alloc_skb(napi, lp->max_frm_size);
1224 		if (!new_skb)
1225 			break;
1226 
1227 		phys = dma_map_single(lp->dev, new_skb->data,
1228 				      lp->max_frm_size,
1229 				      DMA_FROM_DEVICE);
1230 		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1231 			if (net_ratelimit())
1232 				netdev_err(lp->ndev, "RX DMA mapping error\n");
1233 			dev_kfree_skb(new_skb);
1234 			break;
1235 		}
1236 		desc_set_phys_addr(lp, phys, cur_p);
1237 
1238 		cur_p->cntrl = lp->max_frm_size;
1239 		cur_p->status = 0;
1240 		cur_p->skb = new_skb;
1241 
1242 		/* Only update tail_p to mark this slot as usable after it has
1243 		 * been successfully refilled.
1244 		 */
1245 		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1246 
1247 		if (++lp->rx_bd_ci >= lp->rx_bd_num)
1248 			lp->rx_bd_ci = 0;
1249 		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1250 	}
1251 
1252 	u64_stats_update_begin(&lp->rx_stat_sync);
1253 	u64_stats_add(&lp->rx_packets, packets);
1254 	u64_stats_add(&lp->rx_bytes, size);
1255 	u64_stats_update_end(&lp->rx_stat_sync);
1256 
1257 	if (tail_p)
1258 		axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1259 
1260 	if (packets < budget && napi_complete_done(napi, packets)) {
1261 		/* Re-enable RX completion interrupts. This should
1262 		 * cause an immediate interrupt if any RX packets are
1263 		 * already pending.
1264 		 */
1265 		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1266 	}
1267 	return packets;
1268 }
1269 
1270 /**
1271  * axienet_tx_irq - Tx Done Isr.
1272  * @irq:	irq number
1273  * @_ndev:	net_device pointer
1274  *
1275  * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1276  *
1277  * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1278  * TX BD processing.
1279  */
axienet_tx_irq(int irq,void * _ndev)1280 static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1281 {
1282 	unsigned int status;
1283 	struct net_device *ndev = _ndev;
1284 	struct axienet_local *lp = netdev_priv(ndev);
1285 
1286 	status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1287 
1288 	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1289 		return IRQ_NONE;
1290 
1291 	axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1292 
1293 	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1294 		netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1295 		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1296 			   (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1297 			   (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1298 		schedule_work(&lp->dma_err_task);
1299 	} else {
1300 		/* Disable further TX completion interrupts and schedule
1301 		 * NAPI to handle the completions.
1302 		 */
1303 		u32 cr = lp->tx_dma_cr;
1304 
1305 		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1306 		if (napi_schedule_prep(&lp->napi_tx)) {
1307 			axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1308 			__napi_schedule(&lp->napi_tx);
1309 		}
1310 	}
1311 
1312 	return IRQ_HANDLED;
1313 }
1314 
1315 /**
1316  * axienet_rx_irq - Rx Isr.
1317  * @irq:	irq number
1318  * @_ndev:	net_device pointer
1319  *
1320  * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1321  *
1322  * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1323  * processing.
1324  */
axienet_rx_irq(int irq,void * _ndev)1325 static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1326 {
1327 	unsigned int status;
1328 	struct net_device *ndev = _ndev;
1329 	struct axienet_local *lp = netdev_priv(ndev);
1330 
1331 	status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1332 
1333 	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1334 		return IRQ_NONE;
1335 
1336 	axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1337 
1338 	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1339 		netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1340 		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1341 			   (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1342 			   (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1343 		schedule_work(&lp->dma_err_task);
1344 	} else {
1345 		/* Disable further RX completion interrupts and schedule
1346 		 * NAPI receive.
1347 		 */
1348 		u32 cr = lp->rx_dma_cr;
1349 
1350 		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1351 		if (napi_schedule_prep(&lp->napi_rx)) {
1352 			axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1353 			__napi_schedule(&lp->napi_rx);
1354 		}
1355 	}
1356 
1357 	return IRQ_HANDLED;
1358 }
1359 
1360 /**
1361  * axienet_eth_irq - Ethernet core Isr.
1362  * @irq:	irq number
1363  * @_ndev:	net_device pointer
1364  *
1365  * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1366  *
1367  * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1368  */
axienet_eth_irq(int irq,void * _ndev)1369 static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1370 {
1371 	struct net_device *ndev = _ndev;
1372 	struct axienet_local *lp = netdev_priv(ndev);
1373 	unsigned int pending;
1374 
1375 	pending = axienet_ior(lp, XAE_IP_OFFSET);
1376 	if (!pending)
1377 		return IRQ_NONE;
1378 
1379 	if (pending & XAE_INT_RXFIFOOVR_MASK)
1380 		ndev->stats.rx_missed_errors++;
1381 
1382 	if (pending & XAE_INT_RXRJECT_MASK)
1383 		ndev->stats.rx_dropped++;
1384 
1385 	axienet_iow(lp, XAE_IS_OFFSET, pending);
1386 	return IRQ_HANDLED;
1387 }
1388 
1389 static void axienet_dma_err_handler(struct work_struct *work);
1390 
1391 /**
1392  * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1393  * allocate skbuff, map the scatterlist and obtain a descriptor
1394  * and then add the callback information and submit descriptor.
1395  *
1396  * @ndev:	net_device pointer
1397  *
1398  */
axienet_rx_submit_desc(struct net_device * ndev)1399 static void axienet_rx_submit_desc(struct net_device *ndev)
1400 {
1401 	struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1402 	struct axienet_local *lp = netdev_priv(ndev);
1403 	struct skbuf_dma_descriptor *skbuf_dma;
1404 	struct sk_buff *skb;
1405 	dma_addr_t addr;
1406 
1407 	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1408 	if (!skbuf_dma)
1409 		return;
1410 
1411 	skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1412 	if (!skb)
1413 		return;
1414 
1415 	sg_init_table(skbuf_dma->sgl, 1);
1416 	addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1417 	if (unlikely(dma_mapping_error(lp->dev, addr))) {
1418 		if (net_ratelimit())
1419 			netdev_err(ndev, "DMA mapping error\n");
1420 		goto rx_submit_err_free_skb;
1421 	}
1422 	sg_dma_address(skbuf_dma->sgl) = addr;
1423 	sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1424 	dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1425 					      1, DMA_DEV_TO_MEM,
1426 					      DMA_PREP_INTERRUPT);
1427 	if (!dma_rx_desc)
1428 		goto rx_submit_err_unmap_skb;
1429 
1430 	skbuf_dma->skb = skb;
1431 	skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1432 	skbuf_dma->desc = dma_rx_desc;
1433 	dma_rx_desc->callback_param = lp;
1434 	dma_rx_desc->callback_result = axienet_dma_rx_cb;
1435 	lp->rx_ring_head++;
1436 	dmaengine_submit(dma_rx_desc);
1437 
1438 	return;
1439 
1440 rx_submit_err_unmap_skb:
1441 	dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1442 rx_submit_err_free_skb:
1443 	dev_kfree_skb(skb);
1444 }
1445 
1446 /**
1447  * axienet_init_dmaengine - init the dmaengine code.
1448  * @ndev:       Pointer to net_device structure
1449  *
1450  * Return: 0, on success.
1451  *          non-zero error value on failure
1452  *
1453  * This is the dmaengine initialization code.
1454  */
axienet_init_dmaengine(struct net_device * ndev)1455 static int axienet_init_dmaengine(struct net_device *ndev)
1456 {
1457 	struct axienet_local *lp = netdev_priv(ndev);
1458 	struct skbuf_dma_descriptor *skbuf_dma;
1459 	int i, ret;
1460 
1461 	lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1462 	if (IS_ERR(lp->tx_chan)) {
1463 		dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1464 		return PTR_ERR(lp->tx_chan);
1465 	}
1466 
1467 	lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1468 	if (IS_ERR(lp->rx_chan)) {
1469 		ret = PTR_ERR(lp->rx_chan);
1470 		dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1471 		goto err_dma_release_tx;
1472 	}
1473 
1474 	lp->tx_ring_tail = 0;
1475 	lp->tx_ring_head = 0;
1476 	lp->rx_ring_tail = 0;
1477 	lp->rx_ring_head = 0;
1478 	lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1479 				  GFP_KERNEL);
1480 	if (!lp->tx_skb_ring) {
1481 		ret = -ENOMEM;
1482 		goto err_dma_release_rx;
1483 	}
1484 	for (i = 0; i < TX_BD_NUM_MAX; i++) {
1485 		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1486 		if (!skbuf_dma) {
1487 			ret = -ENOMEM;
1488 			goto err_free_tx_skb_ring;
1489 		}
1490 		lp->tx_skb_ring[i] = skbuf_dma;
1491 	}
1492 
1493 	lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1494 				  GFP_KERNEL);
1495 	if (!lp->rx_skb_ring) {
1496 		ret = -ENOMEM;
1497 		goto err_free_tx_skb_ring;
1498 	}
1499 	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1500 		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1501 		if (!skbuf_dma) {
1502 			ret = -ENOMEM;
1503 			goto err_free_rx_skb_ring;
1504 		}
1505 		lp->rx_skb_ring[i] = skbuf_dma;
1506 	}
1507 	/* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1508 	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1509 		axienet_rx_submit_desc(ndev);
1510 	dma_async_issue_pending(lp->rx_chan);
1511 
1512 	return 0;
1513 
1514 err_free_rx_skb_ring:
1515 	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1516 		kfree(lp->rx_skb_ring[i]);
1517 	kfree(lp->rx_skb_ring);
1518 err_free_tx_skb_ring:
1519 	for (i = 0; i < TX_BD_NUM_MAX; i++)
1520 		kfree(lp->tx_skb_ring[i]);
1521 	kfree(lp->tx_skb_ring);
1522 err_dma_release_rx:
1523 	dma_release_channel(lp->rx_chan);
1524 err_dma_release_tx:
1525 	dma_release_channel(lp->tx_chan);
1526 	return ret;
1527 }
1528 
1529 /**
1530  * axienet_init_legacy_dma - init the dma legacy code.
1531  * @ndev:       Pointer to net_device structure
1532  *
1533  * Return: 0, on success.
1534  *          non-zero error value on failure
1535  *
1536  * This is the dma  initialization code. It also allocates interrupt
1537  * service routines, enables the interrupt lines and ISR handling.
1538  *
1539  */
axienet_init_legacy_dma(struct net_device * ndev)1540 static int axienet_init_legacy_dma(struct net_device *ndev)
1541 {
1542 	int ret;
1543 	struct axienet_local *lp = netdev_priv(ndev);
1544 
1545 	/* Enable worker thread for Axi DMA error handling */
1546 	lp->stopping = false;
1547 	INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1548 
1549 	napi_enable(&lp->napi_rx);
1550 	napi_enable(&lp->napi_tx);
1551 
1552 	/* Enable interrupts for Axi DMA Tx */
1553 	ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1554 			  ndev->name, ndev);
1555 	if (ret)
1556 		goto err_tx_irq;
1557 	/* Enable interrupts for Axi DMA Rx */
1558 	ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1559 			  ndev->name, ndev);
1560 	if (ret)
1561 		goto err_rx_irq;
1562 	/* Enable interrupts for Axi Ethernet core (if defined) */
1563 	if (lp->eth_irq > 0) {
1564 		ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1565 				  ndev->name, ndev);
1566 		if (ret)
1567 			goto err_eth_irq;
1568 	}
1569 
1570 	return 0;
1571 
1572 err_eth_irq:
1573 	free_irq(lp->rx_irq, ndev);
1574 err_rx_irq:
1575 	free_irq(lp->tx_irq, ndev);
1576 err_tx_irq:
1577 	napi_disable(&lp->napi_tx);
1578 	napi_disable(&lp->napi_rx);
1579 	cancel_work_sync(&lp->dma_err_task);
1580 	dev_err(lp->dev, "request_irq() failed\n");
1581 	return ret;
1582 }
1583 
1584 /**
1585  * axienet_open - Driver open routine.
1586  * @ndev:	Pointer to net_device structure
1587  *
1588  * Return: 0, on success.
1589  *	    non-zero error value on failure
1590  *
1591  * This is the driver open routine. It calls phylink_start to start the
1592  * PHY device.
1593  * It also allocates interrupt service routines, enables the interrupt lines
1594  * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1595  * descriptors are initialized.
1596  */
axienet_open(struct net_device * ndev)1597 static int axienet_open(struct net_device *ndev)
1598 {
1599 	int ret;
1600 	struct axienet_local *lp = netdev_priv(ndev);
1601 
1602 	/* When we do an Axi Ethernet reset, it resets the complete core
1603 	 * including the MDIO. MDIO must be disabled before resetting.
1604 	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1605 	 */
1606 	axienet_lock_mii(lp);
1607 	ret = axienet_device_reset(ndev);
1608 	axienet_unlock_mii(lp);
1609 
1610 	ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1611 	if (ret) {
1612 		dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1613 		return ret;
1614 	}
1615 
1616 	phylink_start(lp->phylink);
1617 
1618 	/* Start the statistics refresh work */
1619 	schedule_delayed_work(&lp->stats_work, 0);
1620 
1621 	if (lp->use_dmaengine) {
1622 		/* Enable interrupts for Axi Ethernet core (if defined) */
1623 		if (lp->eth_irq > 0) {
1624 			ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1625 					  ndev->name, ndev);
1626 			if (ret)
1627 				goto err_phy;
1628 		}
1629 
1630 		ret = axienet_init_dmaengine(ndev);
1631 		if (ret < 0)
1632 			goto err_free_eth_irq;
1633 	} else {
1634 		ret = axienet_init_legacy_dma(ndev);
1635 		if (ret)
1636 			goto err_phy;
1637 	}
1638 
1639 	return 0;
1640 
1641 err_free_eth_irq:
1642 	if (lp->eth_irq > 0)
1643 		free_irq(lp->eth_irq, ndev);
1644 err_phy:
1645 	cancel_delayed_work_sync(&lp->stats_work);
1646 	phylink_stop(lp->phylink);
1647 	phylink_disconnect_phy(lp->phylink);
1648 	return ret;
1649 }
1650 
1651 /**
1652  * axienet_stop - Driver stop routine.
1653  * @ndev:	Pointer to net_device structure
1654  *
1655  * Return: 0, on success.
1656  *
1657  * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1658  * device. It also removes the interrupt handlers and disables the interrupts.
1659  * The Axi DMA Tx/Rx BDs are released.
1660  */
axienet_stop(struct net_device * ndev)1661 static int axienet_stop(struct net_device *ndev)
1662 {
1663 	struct axienet_local *lp = netdev_priv(ndev);
1664 	int i;
1665 
1666 	if (!lp->use_dmaengine) {
1667 		WRITE_ONCE(lp->stopping, true);
1668 		flush_work(&lp->dma_err_task);
1669 
1670 		napi_disable(&lp->napi_tx);
1671 		napi_disable(&lp->napi_rx);
1672 	}
1673 
1674 	cancel_delayed_work_sync(&lp->stats_work);
1675 
1676 	phylink_stop(lp->phylink);
1677 	phylink_disconnect_phy(lp->phylink);
1678 
1679 	axienet_setoptions(ndev, lp->options &
1680 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1681 
1682 	if (!lp->use_dmaengine) {
1683 		axienet_dma_stop(lp);
1684 		cancel_work_sync(&lp->dma_err_task);
1685 		free_irq(lp->tx_irq, ndev);
1686 		free_irq(lp->rx_irq, ndev);
1687 		axienet_dma_bd_release(ndev);
1688 	} else {
1689 		dmaengine_terminate_sync(lp->tx_chan);
1690 		dmaengine_synchronize(lp->tx_chan);
1691 		dmaengine_terminate_sync(lp->rx_chan);
1692 		dmaengine_synchronize(lp->rx_chan);
1693 
1694 		for (i = 0; i < TX_BD_NUM_MAX; i++)
1695 			kfree(lp->tx_skb_ring[i]);
1696 		kfree(lp->tx_skb_ring);
1697 		for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1698 			kfree(lp->rx_skb_ring[i]);
1699 		kfree(lp->rx_skb_ring);
1700 
1701 		dma_release_channel(lp->rx_chan);
1702 		dma_release_channel(lp->tx_chan);
1703 	}
1704 
1705 	axienet_iow(lp, XAE_IE_OFFSET, 0);
1706 
1707 	if (lp->eth_irq > 0)
1708 		free_irq(lp->eth_irq, ndev);
1709 	return 0;
1710 }
1711 
1712 /**
1713  * axienet_change_mtu - Driver change mtu routine.
1714  * @ndev:	Pointer to net_device structure
1715  * @new_mtu:	New mtu value to be applied
1716  *
1717  * Return: Always returns 0 (success).
1718  *
1719  * This is the change mtu driver routine. It checks if the Axi Ethernet
1720  * hardware supports jumbo frames before changing the mtu. This can be
1721  * called only when the device is not up.
1722  */
axienet_change_mtu(struct net_device * ndev,int new_mtu)1723 static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1724 {
1725 	struct axienet_local *lp = netdev_priv(ndev);
1726 
1727 	if (netif_running(ndev))
1728 		return -EBUSY;
1729 
1730 	if ((new_mtu + VLAN_ETH_HLEN +
1731 		XAE_TRL_SIZE) > lp->rxmem)
1732 		return -EINVAL;
1733 
1734 	WRITE_ONCE(ndev->mtu, new_mtu);
1735 
1736 	return 0;
1737 }
1738 
1739 #ifdef CONFIG_NET_POLL_CONTROLLER
1740 /**
1741  * axienet_poll_controller - Axi Ethernet poll mechanism.
1742  * @ndev:	Pointer to net_device structure
1743  *
1744  * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1745  * to polling the ISRs and are enabled back after the polling is done.
1746  */
axienet_poll_controller(struct net_device * ndev)1747 static void axienet_poll_controller(struct net_device *ndev)
1748 {
1749 	struct axienet_local *lp = netdev_priv(ndev);
1750 
1751 	disable_irq(lp->tx_irq);
1752 	disable_irq(lp->rx_irq);
1753 	axienet_rx_irq(lp->tx_irq, ndev);
1754 	axienet_tx_irq(lp->rx_irq, ndev);
1755 	enable_irq(lp->tx_irq);
1756 	enable_irq(lp->rx_irq);
1757 }
1758 #endif
1759 
axienet_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1760 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1761 {
1762 	struct axienet_local *lp = netdev_priv(dev);
1763 
1764 	if (!netif_running(dev))
1765 		return -EINVAL;
1766 
1767 	return phylink_mii_ioctl(lp->phylink, rq, cmd);
1768 }
1769 
1770 static void
axienet_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1771 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1772 {
1773 	struct axienet_local *lp = netdev_priv(dev);
1774 	unsigned int start;
1775 
1776 	netdev_stats_to_stats64(stats, &dev->stats);
1777 
1778 	do {
1779 		start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1780 		stats->rx_packets = u64_stats_read(&lp->rx_packets);
1781 		stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1782 	} while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1783 
1784 	do {
1785 		start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1786 		stats->tx_packets = u64_stats_read(&lp->tx_packets);
1787 		stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1788 	} while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1789 
1790 	if (!(lp->features & XAE_FEATURE_STATS))
1791 		return;
1792 
1793 	do {
1794 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
1795 		stats->rx_length_errors =
1796 			axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
1797 		stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
1798 		stats->rx_frame_errors =
1799 			axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
1800 		stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
1801 				   axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
1802 				   stats->rx_length_errors +
1803 				   stats->rx_crc_errors +
1804 				   stats->rx_frame_errors;
1805 		stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
1806 
1807 		stats->tx_aborted_errors =
1808 			axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
1809 		stats->tx_fifo_errors =
1810 			axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
1811 		stats->tx_window_errors =
1812 			axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
1813 		stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
1814 				   stats->tx_aborted_errors +
1815 				   stats->tx_fifo_errors +
1816 				   stats->tx_window_errors;
1817 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
1818 }
1819 
1820 static const struct net_device_ops axienet_netdev_ops = {
1821 	.ndo_open = axienet_open,
1822 	.ndo_stop = axienet_stop,
1823 	.ndo_start_xmit = axienet_start_xmit,
1824 	.ndo_get_stats64 = axienet_get_stats64,
1825 	.ndo_change_mtu	= axienet_change_mtu,
1826 	.ndo_set_mac_address = netdev_set_mac_address,
1827 	.ndo_validate_addr = eth_validate_addr,
1828 	.ndo_eth_ioctl = axienet_ioctl,
1829 	.ndo_set_rx_mode = axienet_set_multicast_list,
1830 #ifdef CONFIG_NET_POLL_CONTROLLER
1831 	.ndo_poll_controller = axienet_poll_controller,
1832 #endif
1833 };
1834 
1835 static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1836 	.ndo_open = axienet_open,
1837 	.ndo_stop = axienet_stop,
1838 	.ndo_start_xmit = axienet_start_xmit_dmaengine,
1839 	.ndo_get_stats64 = axienet_get_stats64,
1840 	.ndo_change_mtu	= axienet_change_mtu,
1841 	.ndo_set_mac_address = netdev_set_mac_address,
1842 	.ndo_validate_addr = eth_validate_addr,
1843 	.ndo_eth_ioctl = axienet_ioctl,
1844 	.ndo_set_rx_mode = axienet_set_multicast_list,
1845 };
1846 
1847 /**
1848  * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1849  * @ndev:	Pointer to net_device structure
1850  * @ed:		Pointer to ethtool_drvinfo structure
1851  *
1852  * This implements ethtool command for getting the driver information.
1853  * Issue "ethtool -i ethX" under linux prompt to execute this function.
1854  */
axienet_ethtools_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * ed)1855 static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1856 					 struct ethtool_drvinfo *ed)
1857 {
1858 	strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1859 	strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1860 }
1861 
1862 /**
1863  * axienet_ethtools_get_regs_len - Get the total regs length present in the
1864  *				   AxiEthernet core.
1865  * @ndev:	Pointer to net_device structure
1866  *
1867  * This implements ethtool command for getting the total register length
1868  * information.
1869  *
1870  * Return: the total regs length
1871  */
axienet_ethtools_get_regs_len(struct net_device * ndev)1872 static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1873 {
1874 	return sizeof(u32) * AXIENET_REGS_N;
1875 }
1876 
1877 /**
1878  * axienet_ethtools_get_regs - Dump the contents of all registers present
1879  *			       in AxiEthernet core.
1880  * @ndev:	Pointer to net_device structure
1881  * @regs:	Pointer to ethtool_regs structure
1882  * @ret:	Void pointer used to return the contents of the registers.
1883  *
1884  * This implements ethtool command for getting the Axi Ethernet register dump.
1885  * Issue "ethtool -d ethX" to execute this function.
1886  */
axienet_ethtools_get_regs(struct net_device * ndev,struct ethtool_regs * regs,void * ret)1887 static void axienet_ethtools_get_regs(struct net_device *ndev,
1888 				      struct ethtool_regs *regs, void *ret)
1889 {
1890 	u32 *data = (u32 *)ret;
1891 	size_t len = sizeof(u32) * AXIENET_REGS_N;
1892 	struct axienet_local *lp = netdev_priv(ndev);
1893 
1894 	regs->version = 0;
1895 	regs->len = len;
1896 
1897 	memset(data, 0, len);
1898 	data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1899 	data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1900 	data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1901 	data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1902 	data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1903 	data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1904 	data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1905 	data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1906 	data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1907 	data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1908 	data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1909 	data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1910 	data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1911 	data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1912 	data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1913 	data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1914 	data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1915 	data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1916 	data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1917 	data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1918 	data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1919 	data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1920 	data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1921 	data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1922 	data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1923 	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1924 	data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1925 	data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1926 	if (!lp->use_dmaengine) {
1927 		data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1928 		data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1929 		data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1930 		data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1931 		data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1932 		data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1933 		data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1934 		data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1935 	}
1936 }
1937 
1938 static void
axienet_ethtools_get_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)1939 axienet_ethtools_get_ringparam(struct net_device *ndev,
1940 			       struct ethtool_ringparam *ering,
1941 			       struct kernel_ethtool_ringparam *kernel_ering,
1942 			       struct netlink_ext_ack *extack)
1943 {
1944 	struct axienet_local *lp = netdev_priv(ndev);
1945 
1946 	ering->rx_max_pending = RX_BD_NUM_MAX;
1947 	ering->rx_mini_max_pending = 0;
1948 	ering->rx_jumbo_max_pending = 0;
1949 	ering->tx_max_pending = TX_BD_NUM_MAX;
1950 	ering->rx_pending = lp->rx_bd_num;
1951 	ering->rx_mini_pending = 0;
1952 	ering->rx_jumbo_pending = 0;
1953 	ering->tx_pending = lp->tx_bd_num;
1954 }
1955 
1956 static int
axienet_ethtools_set_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)1957 axienet_ethtools_set_ringparam(struct net_device *ndev,
1958 			       struct ethtool_ringparam *ering,
1959 			       struct kernel_ethtool_ringparam *kernel_ering,
1960 			       struct netlink_ext_ack *extack)
1961 {
1962 	struct axienet_local *lp = netdev_priv(ndev);
1963 
1964 	if (ering->rx_pending > RX_BD_NUM_MAX ||
1965 	    ering->rx_mini_pending ||
1966 	    ering->rx_jumbo_pending ||
1967 	    ering->tx_pending < TX_BD_NUM_MIN ||
1968 	    ering->tx_pending > TX_BD_NUM_MAX)
1969 		return -EINVAL;
1970 
1971 	if (netif_running(ndev))
1972 		return -EBUSY;
1973 
1974 	lp->rx_bd_num = ering->rx_pending;
1975 	lp->tx_bd_num = ering->tx_pending;
1976 	return 0;
1977 }
1978 
1979 /**
1980  * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1981  *				     Tx and Rx paths.
1982  * @ndev:	Pointer to net_device structure
1983  * @epauseparm:	Pointer to ethtool_pauseparam structure.
1984  *
1985  * This implements ethtool command for getting axi ethernet pause frame
1986  * setting. Issue "ethtool -a ethX" to execute this function.
1987  */
1988 static void
axienet_ethtools_get_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * epauseparm)1989 axienet_ethtools_get_pauseparam(struct net_device *ndev,
1990 				struct ethtool_pauseparam *epauseparm)
1991 {
1992 	struct axienet_local *lp = netdev_priv(ndev);
1993 
1994 	phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1995 }
1996 
1997 /**
1998  * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1999  *				     settings.
2000  * @ndev:	Pointer to net_device structure
2001  * @epauseparm:Pointer to ethtool_pauseparam structure
2002  *
2003  * This implements ethtool command for enabling flow control on Rx and Tx
2004  * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
2005  * function.
2006  *
2007  * Return: 0 on success, -EFAULT if device is running
2008  */
2009 static int
axienet_ethtools_set_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * epauseparm)2010 axienet_ethtools_set_pauseparam(struct net_device *ndev,
2011 				struct ethtool_pauseparam *epauseparm)
2012 {
2013 	struct axienet_local *lp = netdev_priv(ndev);
2014 
2015 	return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
2016 }
2017 
2018 /**
2019  * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2020  * @ndev:	Pointer to net_device structure
2021  * @ecoalesce:	Pointer to ethtool_coalesce structure
2022  * @kernel_coal: ethtool CQE mode setting structure
2023  * @extack:	extack for reporting error messages
2024  *
2025  * This implements ethtool command for getting the DMA interrupt coalescing
2026  * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2027  * execute this function.
2028  *
2029  * Return: 0 always
2030  */
2031 static int
axienet_ethtools_get_coalesce(struct net_device * ndev,struct ethtool_coalesce * ecoalesce,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)2032 axienet_ethtools_get_coalesce(struct net_device *ndev,
2033 			      struct ethtool_coalesce *ecoalesce,
2034 			      struct kernel_ethtool_coalesce *kernel_coal,
2035 			      struct netlink_ext_ack *extack)
2036 {
2037 	struct axienet_local *lp = netdev_priv(ndev);
2038 
2039 	ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
2040 	ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
2041 	ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
2042 	ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
2043 	return 0;
2044 }
2045 
2046 /**
2047  * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2048  * @ndev:	Pointer to net_device structure
2049  * @ecoalesce:	Pointer to ethtool_coalesce structure
2050  * @kernel_coal: ethtool CQE mode setting structure
2051  * @extack:	extack for reporting error messages
2052  *
2053  * This implements ethtool command for setting the DMA interrupt coalescing
2054  * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2055  * prompt to execute this function.
2056  *
2057  * Return: 0, on success, Non-zero error value on failure.
2058  */
2059 static int
axienet_ethtools_set_coalesce(struct net_device * ndev,struct ethtool_coalesce * ecoalesce,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)2060 axienet_ethtools_set_coalesce(struct net_device *ndev,
2061 			      struct ethtool_coalesce *ecoalesce,
2062 			      struct kernel_ethtool_coalesce *kernel_coal,
2063 			      struct netlink_ext_ack *extack)
2064 {
2065 	struct axienet_local *lp = netdev_priv(ndev);
2066 
2067 	if (netif_running(ndev)) {
2068 		NL_SET_ERR_MSG(extack,
2069 			       "Please stop netif before applying configuration");
2070 		return -EBUSY;
2071 	}
2072 
2073 	if (ecoalesce->rx_max_coalesced_frames > 255 ||
2074 	    ecoalesce->tx_max_coalesced_frames > 255) {
2075 		NL_SET_ERR_MSG(extack, "frames must be less than 256");
2076 		return -EINVAL;
2077 	}
2078 
2079 	if (ecoalesce->rx_max_coalesced_frames)
2080 		lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
2081 	if (ecoalesce->rx_coalesce_usecs)
2082 		lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
2083 	if (ecoalesce->tx_max_coalesced_frames)
2084 		lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
2085 	if (ecoalesce->tx_coalesce_usecs)
2086 		lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
2087 
2088 	return 0;
2089 }
2090 
2091 static int
axienet_ethtools_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * cmd)2092 axienet_ethtools_get_link_ksettings(struct net_device *ndev,
2093 				    struct ethtool_link_ksettings *cmd)
2094 {
2095 	struct axienet_local *lp = netdev_priv(ndev);
2096 
2097 	return phylink_ethtool_ksettings_get(lp->phylink, cmd);
2098 }
2099 
2100 static int
axienet_ethtools_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * cmd)2101 axienet_ethtools_set_link_ksettings(struct net_device *ndev,
2102 				    const struct ethtool_link_ksettings *cmd)
2103 {
2104 	struct axienet_local *lp = netdev_priv(ndev);
2105 
2106 	return phylink_ethtool_ksettings_set(lp->phylink, cmd);
2107 }
2108 
axienet_ethtools_nway_reset(struct net_device * dev)2109 static int axienet_ethtools_nway_reset(struct net_device *dev)
2110 {
2111 	struct axienet_local *lp = netdev_priv(dev);
2112 
2113 	return phylink_ethtool_nway_reset(lp->phylink);
2114 }
2115 
axienet_ethtools_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2116 static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
2117 					       struct ethtool_stats *stats,
2118 					       u64 *data)
2119 {
2120 	struct axienet_local *lp = netdev_priv(dev);
2121 	unsigned int start;
2122 
2123 	do {
2124 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2125 		data[0] = axienet_stat(lp, STAT_RX_BYTES);
2126 		data[1] = axienet_stat(lp, STAT_TX_BYTES);
2127 		data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
2128 		data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
2129 		data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
2130 		data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
2131 		data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
2132 		data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
2133 		data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
2134 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2135 }
2136 
2137 static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
2138 	"Received bytes",
2139 	"Transmitted bytes",
2140 	"RX Good VLAN Tagged Frames",
2141 	"TX Good VLAN Tagged Frames",
2142 	"TX Good PFC Frames",
2143 	"RX Good PFC Frames",
2144 	"User Defined Counter 0",
2145 	"User Defined Counter 1",
2146 	"User Defined Counter 2",
2147 };
2148 
axienet_ethtools_get_strings(struct net_device * dev,u32 stringset,u8 * data)2149 static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2150 {
2151 	switch (stringset) {
2152 	case ETH_SS_STATS:
2153 		memcpy(data, axienet_ethtool_stats_strings,
2154 		       sizeof(axienet_ethtool_stats_strings));
2155 		break;
2156 	}
2157 }
2158 
axienet_ethtools_get_sset_count(struct net_device * dev,int sset)2159 static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
2160 {
2161 	struct axienet_local *lp = netdev_priv(dev);
2162 
2163 	switch (sset) {
2164 	case ETH_SS_STATS:
2165 		if (lp->features & XAE_FEATURE_STATS)
2166 			return ARRAY_SIZE(axienet_ethtool_stats_strings);
2167 		fallthrough;
2168 	default:
2169 		return -EOPNOTSUPP;
2170 	}
2171 }
2172 
2173 static void
axienet_ethtools_get_pause_stats(struct net_device * dev,struct ethtool_pause_stats * pause_stats)2174 axienet_ethtools_get_pause_stats(struct net_device *dev,
2175 				 struct ethtool_pause_stats *pause_stats)
2176 {
2177 	struct axienet_local *lp = netdev_priv(dev);
2178 	unsigned int start;
2179 
2180 	if (!(lp->features & XAE_FEATURE_STATS))
2181 		return;
2182 
2183 	do {
2184 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2185 		pause_stats->tx_pause_frames =
2186 			axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
2187 		pause_stats->rx_pause_frames =
2188 			axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
2189 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2190 }
2191 
2192 static void
axienet_ethtool_get_eth_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * mac_stats)2193 axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
2194 				  struct ethtool_eth_mac_stats *mac_stats)
2195 {
2196 	struct axienet_local *lp = netdev_priv(dev);
2197 	unsigned int start;
2198 
2199 	if (!(lp->features & XAE_FEATURE_STATS))
2200 		return;
2201 
2202 	do {
2203 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2204 		mac_stats->FramesTransmittedOK =
2205 			axienet_stat(lp, STAT_TX_GOOD_FRAMES);
2206 		mac_stats->SingleCollisionFrames =
2207 			axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
2208 		mac_stats->MultipleCollisionFrames =
2209 			axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
2210 		mac_stats->FramesReceivedOK =
2211 			axienet_stat(lp, STAT_RX_GOOD_FRAMES);
2212 		mac_stats->FrameCheckSequenceErrors =
2213 			axienet_stat(lp, STAT_RX_FCS_ERRORS);
2214 		mac_stats->AlignmentErrors =
2215 			axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
2216 		mac_stats->FramesWithDeferredXmissions =
2217 			axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
2218 		mac_stats->LateCollisions =
2219 			axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
2220 		mac_stats->FramesAbortedDueToXSColls =
2221 			axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
2222 		mac_stats->MulticastFramesXmittedOK =
2223 			axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
2224 		mac_stats->BroadcastFramesXmittedOK =
2225 			axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
2226 		mac_stats->FramesWithExcessiveDeferral =
2227 			axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
2228 		mac_stats->MulticastFramesReceivedOK =
2229 			axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
2230 		mac_stats->BroadcastFramesReceivedOK =
2231 			axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
2232 		mac_stats->InRangeLengthErrors =
2233 			axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
2234 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2235 }
2236 
2237 static void
axienet_ethtool_get_eth_ctrl_stats(struct net_device * dev,struct ethtool_eth_ctrl_stats * ctrl_stats)2238 axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
2239 				   struct ethtool_eth_ctrl_stats *ctrl_stats)
2240 {
2241 	struct axienet_local *lp = netdev_priv(dev);
2242 	unsigned int start;
2243 
2244 	if (!(lp->features & XAE_FEATURE_STATS))
2245 		return;
2246 
2247 	do {
2248 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2249 		ctrl_stats->MACControlFramesTransmitted =
2250 			axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
2251 		ctrl_stats->MACControlFramesReceived =
2252 			axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
2253 		ctrl_stats->UnsupportedOpcodesReceived =
2254 			axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
2255 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2256 }
2257 
2258 static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
2259 	{   64,    64 },
2260 	{   65,   127 },
2261 	{  128,   255 },
2262 	{  256,   511 },
2263 	{  512,  1023 },
2264 	{ 1024,  1518 },
2265 	{ 1519, 16384 },
2266 	{ },
2267 };
2268 
2269 static void
axienet_ethtool_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)2270 axienet_ethtool_get_rmon_stats(struct net_device *dev,
2271 			       struct ethtool_rmon_stats *rmon_stats,
2272 			       const struct ethtool_rmon_hist_range **ranges)
2273 {
2274 	struct axienet_local *lp = netdev_priv(dev);
2275 	unsigned int start;
2276 
2277 	if (!(lp->features & XAE_FEATURE_STATS))
2278 		return;
2279 
2280 	do {
2281 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2282 		rmon_stats->undersize_pkts =
2283 			axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
2284 		rmon_stats->oversize_pkts =
2285 			axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
2286 		rmon_stats->fragments =
2287 			axienet_stat(lp, STAT_FRAGMENT_FRAMES);
2288 
2289 		rmon_stats->hist[0] =
2290 			axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
2291 		rmon_stats->hist[1] =
2292 			axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
2293 		rmon_stats->hist[2] =
2294 			axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
2295 		rmon_stats->hist[3] =
2296 			axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
2297 		rmon_stats->hist[4] =
2298 			axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
2299 		rmon_stats->hist[5] =
2300 			axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
2301 		rmon_stats->hist[6] =
2302 			rmon_stats->oversize_pkts;
2303 
2304 		rmon_stats->hist_tx[0] =
2305 			axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
2306 		rmon_stats->hist_tx[1] =
2307 			axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
2308 		rmon_stats->hist_tx[2] =
2309 			axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
2310 		rmon_stats->hist_tx[3] =
2311 			axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
2312 		rmon_stats->hist_tx[4] =
2313 			axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
2314 		rmon_stats->hist_tx[5] =
2315 			axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
2316 		rmon_stats->hist_tx[6] =
2317 			axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
2318 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2319 
2320 	*ranges = axienet_rmon_ranges;
2321 }
2322 
2323 static const struct ethtool_ops axienet_ethtool_ops = {
2324 	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
2325 				     ETHTOOL_COALESCE_USECS,
2326 	.get_drvinfo    = axienet_ethtools_get_drvinfo,
2327 	.get_regs_len   = axienet_ethtools_get_regs_len,
2328 	.get_regs       = axienet_ethtools_get_regs,
2329 	.get_link       = ethtool_op_get_link,
2330 	.get_ringparam	= axienet_ethtools_get_ringparam,
2331 	.set_ringparam	= axienet_ethtools_set_ringparam,
2332 	.get_pauseparam = axienet_ethtools_get_pauseparam,
2333 	.set_pauseparam = axienet_ethtools_set_pauseparam,
2334 	.get_coalesce   = axienet_ethtools_get_coalesce,
2335 	.set_coalesce   = axienet_ethtools_set_coalesce,
2336 	.get_link_ksettings = axienet_ethtools_get_link_ksettings,
2337 	.set_link_ksettings = axienet_ethtools_set_link_ksettings,
2338 	.nway_reset	= axienet_ethtools_nway_reset,
2339 	.get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
2340 	.get_strings    = axienet_ethtools_get_strings,
2341 	.get_sset_count = axienet_ethtools_get_sset_count,
2342 	.get_pause_stats = axienet_ethtools_get_pause_stats,
2343 	.get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
2344 	.get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
2345 	.get_rmon_stats = axienet_ethtool_get_rmon_stats,
2346 };
2347 
pcs_to_axienet_local(struct phylink_pcs * pcs)2348 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
2349 {
2350 	return container_of(pcs, struct axienet_local, pcs);
2351 }
2352 
axienet_pcs_get_state(struct phylink_pcs * pcs,struct phylink_link_state * state)2353 static void axienet_pcs_get_state(struct phylink_pcs *pcs,
2354 				  struct phylink_link_state *state)
2355 {
2356 	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2357 
2358 	phylink_mii_c22_pcs_get_state(pcs_phy, state);
2359 }
2360 
axienet_pcs_an_restart(struct phylink_pcs * pcs)2361 static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
2362 {
2363 	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2364 
2365 	phylink_mii_c22_pcs_an_restart(pcs_phy);
2366 }
2367 
axienet_pcs_config(struct phylink_pcs * pcs,unsigned int neg_mode,phy_interface_t interface,const unsigned long * advertising,bool permit_pause_to_mac)2368 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
2369 			      phy_interface_t interface,
2370 			      const unsigned long *advertising,
2371 			      bool permit_pause_to_mac)
2372 {
2373 	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2374 	struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
2375 	struct axienet_local *lp = netdev_priv(ndev);
2376 	int ret;
2377 
2378 	if (lp->switch_x_sgmii) {
2379 		ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
2380 				    interface == PHY_INTERFACE_MODE_SGMII ?
2381 					XLNX_MII_STD_SELECT_SGMII : 0);
2382 		if (ret < 0) {
2383 			netdev_warn(ndev,
2384 				    "Failed to switch PHY interface: %d\n",
2385 				    ret);
2386 			return ret;
2387 		}
2388 	}
2389 
2390 	ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2391 					 neg_mode);
2392 	if (ret < 0)
2393 		netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
2394 
2395 	return ret;
2396 }
2397 
2398 static const struct phylink_pcs_ops axienet_pcs_ops = {
2399 	.pcs_get_state = axienet_pcs_get_state,
2400 	.pcs_config = axienet_pcs_config,
2401 	.pcs_an_restart = axienet_pcs_an_restart,
2402 };
2403 
axienet_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)2404 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2405 						  phy_interface_t interface)
2406 {
2407 	struct net_device *ndev = to_net_dev(config->dev);
2408 	struct axienet_local *lp = netdev_priv(ndev);
2409 
2410 	if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2411 	    interface ==  PHY_INTERFACE_MODE_SGMII)
2412 		return &lp->pcs;
2413 
2414 	return NULL;
2415 }
2416 
axienet_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)2417 static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2418 			       const struct phylink_link_state *state)
2419 {
2420 	/* nothing meaningful to do */
2421 }
2422 
axienet_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)2423 static void axienet_mac_link_down(struct phylink_config *config,
2424 				  unsigned int mode,
2425 				  phy_interface_t interface)
2426 {
2427 	/* nothing meaningful to do */
2428 }
2429 
axienet_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)2430 static void axienet_mac_link_up(struct phylink_config *config,
2431 				struct phy_device *phy,
2432 				unsigned int mode, phy_interface_t interface,
2433 				int speed, int duplex,
2434 				bool tx_pause, bool rx_pause)
2435 {
2436 	struct net_device *ndev = to_net_dev(config->dev);
2437 	struct axienet_local *lp = netdev_priv(ndev);
2438 	u32 emmc_reg, fcc_reg;
2439 
2440 	emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2441 	emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2442 
2443 	switch (speed) {
2444 	case SPEED_1000:
2445 		emmc_reg |= XAE_EMMC_LINKSPD_1000;
2446 		break;
2447 	case SPEED_100:
2448 		emmc_reg |= XAE_EMMC_LINKSPD_100;
2449 		break;
2450 	case SPEED_10:
2451 		emmc_reg |= XAE_EMMC_LINKSPD_10;
2452 		break;
2453 	default:
2454 		dev_err(&ndev->dev,
2455 			"Speed other than 10, 100 or 1Gbps is not supported\n");
2456 		break;
2457 	}
2458 
2459 	axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2460 
2461 	fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
2462 	if (tx_pause)
2463 		fcc_reg |= XAE_FCC_FCTX_MASK;
2464 	else
2465 		fcc_reg &= ~XAE_FCC_FCTX_MASK;
2466 	if (rx_pause)
2467 		fcc_reg |= XAE_FCC_FCRX_MASK;
2468 	else
2469 		fcc_reg &= ~XAE_FCC_FCRX_MASK;
2470 	axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2471 }
2472 
2473 static const struct phylink_mac_ops axienet_phylink_ops = {
2474 	.mac_select_pcs = axienet_mac_select_pcs,
2475 	.mac_config = axienet_mac_config,
2476 	.mac_link_down = axienet_mac_link_down,
2477 	.mac_link_up = axienet_mac_link_up,
2478 };
2479 
2480 /**
2481  * axienet_dma_err_handler - Work queue task for Axi DMA Error
2482  * @work:	pointer to work_struct
2483  *
2484  * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2485  * Tx/Rx BDs.
2486  */
axienet_dma_err_handler(struct work_struct * work)2487 static void axienet_dma_err_handler(struct work_struct *work)
2488 {
2489 	u32 i;
2490 	u32 axienet_status;
2491 	struct axidma_bd *cur_p;
2492 	struct axienet_local *lp = container_of(work, struct axienet_local,
2493 						dma_err_task);
2494 	struct net_device *ndev = lp->ndev;
2495 
2496 	/* Don't bother if we are going to stop anyway */
2497 	if (READ_ONCE(lp->stopping))
2498 		return;
2499 
2500 	napi_disable(&lp->napi_tx);
2501 	napi_disable(&lp->napi_rx);
2502 
2503 	axienet_setoptions(ndev, lp->options &
2504 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2505 
2506 	axienet_dma_stop(lp);
2507 
2508 	for (i = 0; i < lp->tx_bd_num; i++) {
2509 		cur_p = &lp->tx_bd_v[i];
2510 		if (cur_p->cntrl) {
2511 			dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2512 
2513 			dma_unmap_single(lp->dev, addr,
2514 					 (cur_p->cntrl &
2515 					  XAXIDMA_BD_CTRL_LENGTH_MASK),
2516 					 DMA_TO_DEVICE);
2517 		}
2518 		if (cur_p->skb)
2519 			dev_kfree_skb_irq(cur_p->skb);
2520 		cur_p->phys = 0;
2521 		cur_p->phys_msb = 0;
2522 		cur_p->cntrl = 0;
2523 		cur_p->status = 0;
2524 		cur_p->app0 = 0;
2525 		cur_p->app1 = 0;
2526 		cur_p->app2 = 0;
2527 		cur_p->app3 = 0;
2528 		cur_p->app4 = 0;
2529 		cur_p->skb = NULL;
2530 	}
2531 
2532 	for (i = 0; i < lp->rx_bd_num; i++) {
2533 		cur_p = &lp->rx_bd_v[i];
2534 		cur_p->status = 0;
2535 		cur_p->app0 = 0;
2536 		cur_p->app1 = 0;
2537 		cur_p->app2 = 0;
2538 		cur_p->app3 = 0;
2539 		cur_p->app4 = 0;
2540 	}
2541 
2542 	lp->tx_bd_ci = 0;
2543 	lp->tx_bd_tail = 0;
2544 	lp->rx_bd_ci = 0;
2545 
2546 	axienet_dma_start(lp);
2547 
2548 	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2549 	axienet_status &= ~XAE_RCW1_RX_MASK;
2550 	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2551 
2552 	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2553 	if (axienet_status & XAE_INT_RXRJECT_MASK)
2554 		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
2555 	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2556 		    XAE_INT_RECV_ERROR_MASK : 0);
2557 	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2558 
2559 	/* Sync default options with HW but leave receiver and
2560 	 * transmitter disabled.
2561 	 */
2562 	axienet_setoptions(ndev, lp->options &
2563 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2564 	axienet_set_mac_address(ndev, NULL);
2565 	axienet_set_multicast_list(ndev);
2566 	napi_enable(&lp->napi_rx);
2567 	napi_enable(&lp->napi_tx);
2568 	axienet_setoptions(ndev, lp->options);
2569 }
2570 
2571 /**
2572  * axienet_probe - Axi Ethernet probe function.
2573  * @pdev:	Pointer to platform device structure.
2574  *
2575  * Return: 0, on success
2576  *	    Non-zero error value on failure.
2577  *
2578  * This is the probe routine for Axi Ethernet driver. This is called before
2579  * any other driver routines are invoked. It allocates and sets up the Ethernet
2580  * device. Parses through device tree and populates fields of
2581  * axienet_local. It registers the Ethernet device.
2582  */
axienet_probe(struct platform_device * pdev)2583 static int axienet_probe(struct platform_device *pdev)
2584 {
2585 	int ret;
2586 	struct device_node *np;
2587 	struct axienet_local *lp;
2588 	struct net_device *ndev;
2589 	struct resource *ethres;
2590 	u8 mac_addr[ETH_ALEN];
2591 	int addr_width = 32;
2592 	u32 value;
2593 
2594 	ndev = alloc_etherdev(sizeof(*lp));
2595 	if (!ndev)
2596 		return -ENOMEM;
2597 
2598 	platform_set_drvdata(pdev, ndev);
2599 
2600 	SET_NETDEV_DEV(ndev, &pdev->dev);
2601 	ndev->features = NETIF_F_SG;
2602 	ndev->ethtool_ops = &axienet_ethtool_ops;
2603 
2604 	/* MTU range: 64 - 9000 */
2605 	ndev->min_mtu = 64;
2606 	ndev->max_mtu = XAE_JUMBO_MTU;
2607 
2608 	lp = netdev_priv(ndev);
2609 	lp->ndev = ndev;
2610 	lp->dev = &pdev->dev;
2611 	lp->options = XAE_OPTION_DEFAULTS;
2612 	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2613 	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
2614 
2615 	u64_stats_init(&lp->rx_stat_sync);
2616 	u64_stats_init(&lp->tx_stat_sync);
2617 
2618 	mutex_init(&lp->stats_lock);
2619 	seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
2620 	INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
2621 
2622 	lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2623 	if (!lp->axi_clk) {
2624 		/* For backward compatibility, if named AXI clock is not present,
2625 		 * treat the first clock specified as the AXI clock.
2626 		 */
2627 		lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2628 	}
2629 	if (IS_ERR(lp->axi_clk)) {
2630 		ret = PTR_ERR(lp->axi_clk);
2631 		goto free_netdev;
2632 	}
2633 	ret = clk_prepare_enable(lp->axi_clk);
2634 	if (ret) {
2635 		dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
2636 		goto free_netdev;
2637 	}
2638 
2639 	lp->misc_clks[0].id = "axis_clk";
2640 	lp->misc_clks[1].id = "ref_clk";
2641 	lp->misc_clks[2].id = "mgt_clk";
2642 
2643 	ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2644 	if (ret)
2645 		goto cleanup_clk;
2646 
2647 	ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2648 	if (ret)
2649 		goto cleanup_clk;
2650 
2651 	/* Map device registers */
2652 	lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
2653 	if (IS_ERR(lp->regs)) {
2654 		ret = PTR_ERR(lp->regs);
2655 		goto cleanup_clk;
2656 	}
2657 	lp->regs_start = ethres->start;
2658 
2659 	/* Setup checksum offload, but default to off if not specified */
2660 	lp->features = 0;
2661 
2662 	if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
2663 		lp->features |= XAE_FEATURE_STATS;
2664 
2665 	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2666 	if (!ret) {
2667 		switch (value) {
2668 		case 1:
2669 			lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
2670 			/* Can checksum any contiguous range */
2671 			ndev->features |= NETIF_F_HW_CSUM;
2672 			break;
2673 		case 2:
2674 			lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2675 			/* Can checksum TCP/UDP over IPv4. */
2676 			ndev->features |= NETIF_F_IP_CSUM;
2677 			break;
2678 		}
2679 	}
2680 	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2681 	if (!ret) {
2682 		switch (value) {
2683 		case 1:
2684 			lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
2685 			ndev->features |= NETIF_F_RXCSUM;
2686 			break;
2687 		case 2:
2688 			lp->features |= XAE_FEATURE_FULL_RX_CSUM;
2689 			ndev->features |= NETIF_F_RXCSUM;
2690 			break;
2691 		}
2692 	}
2693 	/* For supporting jumbo frames, the Axi Ethernet hardware must have
2694 	 * a larger Rx/Tx Memory. Typically, the size must be large so that
2695 	 * we can enable jumbo option and start supporting jumbo frames.
2696 	 * Here we check for memory allocated for Rx/Tx in the hardware from
2697 	 * the device-tree and accordingly set flags.
2698 	 */
2699 	of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
2700 
2701 	lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2702 						   "xlnx,switch-x-sgmii");
2703 
2704 	/* Start with the proprietary, and broken phy_type */
2705 	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2706 	if (!ret) {
2707 		netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2708 		switch (value) {
2709 		case XAE_PHY_TYPE_MII:
2710 			lp->phy_mode = PHY_INTERFACE_MODE_MII;
2711 			break;
2712 		case XAE_PHY_TYPE_GMII:
2713 			lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2714 			break;
2715 		case XAE_PHY_TYPE_RGMII_2_0:
2716 			lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2717 			break;
2718 		case XAE_PHY_TYPE_SGMII:
2719 			lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2720 			break;
2721 		case XAE_PHY_TYPE_1000BASE_X:
2722 			lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2723 			break;
2724 		default:
2725 			ret = -EINVAL;
2726 			goto cleanup_clk;
2727 		}
2728 	} else {
2729 		ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2730 		if (ret)
2731 			goto cleanup_clk;
2732 	}
2733 	if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2734 	    lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2735 		dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2736 		ret = -EINVAL;
2737 		goto cleanup_clk;
2738 	}
2739 
2740 	if (!of_property_present(pdev->dev.of_node, "dmas")) {
2741 		/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2742 		np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2743 
2744 		if (np) {
2745 			struct resource dmares;
2746 
2747 			ret = of_address_to_resource(np, 0, &dmares);
2748 			if (ret) {
2749 				dev_err(&pdev->dev,
2750 					"unable to get DMA resource\n");
2751 				of_node_put(np);
2752 				goto cleanup_clk;
2753 			}
2754 			lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2755 							     &dmares);
2756 			lp->rx_irq = irq_of_parse_and_map(np, 1);
2757 			lp->tx_irq = irq_of_parse_and_map(np, 0);
2758 			of_node_put(np);
2759 			lp->eth_irq = platform_get_irq_optional(pdev, 0);
2760 		} else {
2761 			/* Check for these resources directly on the Ethernet node. */
2762 			lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2763 			lp->rx_irq = platform_get_irq(pdev, 1);
2764 			lp->tx_irq = platform_get_irq(pdev, 0);
2765 			lp->eth_irq = platform_get_irq_optional(pdev, 2);
2766 		}
2767 		if (IS_ERR(lp->dma_regs)) {
2768 			dev_err(&pdev->dev, "could not map DMA regs\n");
2769 			ret = PTR_ERR(lp->dma_regs);
2770 			goto cleanup_clk;
2771 		}
2772 		if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2773 			dev_err(&pdev->dev, "could not determine irqs\n");
2774 			ret = -ENOMEM;
2775 			goto cleanup_clk;
2776 		}
2777 
2778 		/* Reset core now that clocks are enabled, prior to accessing MDIO */
2779 		ret = __axienet_device_reset(lp);
2780 		if (ret)
2781 			goto cleanup_clk;
2782 
2783 		/* Autodetect the need for 64-bit DMA pointers.
2784 		 * When the IP is configured for a bus width bigger than 32 bits,
2785 		 * writing the MSB registers is mandatory, even if they are all 0.
2786 		 * We can detect this case by writing all 1's to one such register
2787 		 * and see if that sticks: when the IP is configured for 32 bits
2788 		 * only, those registers are RES0.
2789 		 * Those MSB registers were introduced in IP v7.1, which we check first.
2790 		 */
2791 		if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2792 			void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2793 
2794 			iowrite32(0x0, desc);
2795 			if (ioread32(desc) == 0) {	/* sanity check */
2796 				iowrite32(0xffffffff, desc);
2797 				if (ioread32(desc) > 0) {
2798 					lp->features |= XAE_FEATURE_DMA_64BIT;
2799 					addr_width = 64;
2800 					dev_info(&pdev->dev,
2801 						 "autodetected 64-bit DMA range\n");
2802 				}
2803 				iowrite32(0x0, desc);
2804 			}
2805 		}
2806 		if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2807 			dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2808 			ret = -EINVAL;
2809 			goto cleanup_clk;
2810 		}
2811 
2812 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2813 		if (ret) {
2814 			dev_err(&pdev->dev, "No suitable DMA available\n");
2815 			goto cleanup_clk;
2816 		}
2817 		netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
2818 		netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
2819 	} else {
2820 		struct xilinx_vdma_config cfg;
2821 		struct dma_chan *tx_chan;
2822 
2823 		lp->eth_irq = platform_get_irq_optional(pdev, 0);
2824 		if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
2825 			ret = lp->eth_irq;
2826 			goto cleanup_clk;
2827 		}
2828 		tx_chan = dma_request_chan(lp->dev, "tx_chan0");
2829 		if (IS_ERR(tx_chan)) {
2830 			ret = PTR_ERR(tx_chan);
2831 			dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
2832 			goto cleanup_clk;
2833 		}
2834 
2835 		cfg.reset = 1;
2836 		/* As name says VDMA but it has support for DMA channel reset */
2837 		ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
2838 		if (ret < 0) {
2839 			dev_err(&pdev->dev, "Reset channel failed\n");
2840 			dma_release_channel(tx_chan);
2841 			goto cleanup_clk;
2842 		}
2843 
2844 		dma_release_channel(tx_chan);
2845 		lp->use_dmaengine = 1;
2846 	}
2847 
2848 	if (lp->use_dmaengine)
2849 		ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
2850 	else
2851 		ndev->netdev_ops = &axienet_netdev_ops;
2852 	/* Check for Ethernet core IRQ (optional) */
2853 	if (lp->eth_irq <= 0)
2854 		dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2855 
2856 	/* Retrieve the MAC address */
2857 	ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
2858 	if (!ret) {
2859 		axienet_set_mac_address(ndev, mac_addr);
2860 	} else {
2861 		dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
2862 			 ret);
2863 		axienet_set_mac_address(ndev, NULL);
2864 	}
2865 
2866 	lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2867 	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2868 	lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
2869 	lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
2870 
2871 	ret = axienet_mdio_setup(lp);
2872 	if (ret)
2873 		dev_warn(&pdev->dev,
2874 			 "error registering MDIO bus: %d\n", ret);
2875 
2876 	if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2877 	    lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2878 		np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2879 		if (!np) {
2880 			/* Deprecated: Always use "pcs-handle" for pcs_phy.
2881 			 * Falling back to "phy-handle" here is only for
2882 			 * backward compatibility with old device trees.
2883 			 */
2884 			np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2885 		}
2886 		if (!np) {
2887 			dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
2888 			ret = -EINVAL;
2889 			goto cleanup_mdio;
2890 		}
2891 		lp->pcs_phy = of_mdio_find_device(np);
2892 		if (!lp->pcs_phy) {
2893 			ret = -EPROBE_DEFER;
2894 			of_node_put(np);
2895 			goto cleanup_mdio;
2896 		}
2897 		of_node_put(np);
2898 		lp->pcs.ops = &axienet_pcs_ops;
2899 		lp->pcs.neg_mode = true;
2900 		lp->pcs.poll = true;
2901 	}
2902 
2903 	lp->phylink_config.dev = &ndev->dev;
2904 	lp->phylink_config.type = PHYLINK_NETDEV;
2905 	lp->phylink_config.mac_managed_pm = true;
2906 	lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
2907 		MAC_10FD | MAC_100FD | MAC_1000FD;
2908 
2909 	__set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2910 	if (lp->switch_x_sgmii) {
2911 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
2912 			  lp->phylink_config.supported_interfaces);
2913 		__set_bit(PHY_INTERFACE_MODE_SGMII,
2914 			  lp->phylink_config.supported_interfaces);
2915 	}
2916 
2917 	lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2918 				     lp->phy_mode,
2919 				     &axienet_phylink_ops);
2920 	if (IS_ERR(lp->phylink)) {
2921 		ret = PTR_ERR(lp->phylink);
2922 		dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2923 		goto cleanup_mdio;
2924 	}
2925 
2926 	ret = register_netdev(lp->ndev);
2927 	if (ret) {
2928 		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2929 		goto cleanup_phylink;
2930 	}
2931 
2932 	return 0;
2933 
2934 cleanup_phylink:
2935 	phylink_destroy(lp->phylink);
2936 
2937 cleanup_mdio:
2938 	if (lp->pcs_phy)
2939 		put_device(&lp->pcs_phy->dev);
2940 	if (lp->mii_bus)
2941 		axienet_mdio_teardown(lp);
2942 cleanup_clk:
2943 	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2944 	clk_disable_unprepare(lp->axi_clk);
2945 
2946 free_netdev:
2947 	free_netdev(ndev);
2948 
2949 	return ret;
2950 }
2951 
axienet_remove(struct platform_device * pdev)2952 static void axienet_remove(struct platform_device *pdev)
2953 {
2954 	struct net_device *ndev = platform_get_drvdata(pdev);
2955 	struct axienet_local *lp = netdev_priv(ndev);
2956 
2957 	unregister_netdev(ndev);
2958 
2959 	if (lp->phylink)
2960 		phylink_destroy(lp->phylink);
2961 
2962 	if (lp->pcs_phy)
2963 		put_device(&lp->pcs_phy->dev);
2964 
2965 	axienet_mdio_teardown(lp);
2966 
2967 	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2968 	clk_disable_unprepare(lp->axi_clk);
2969 
2970 	free_netdev(ndev);
2971 }
2972 
axienet_shutdown(struct platform_device * pdev)2973 static void axienet_shutdown(struct platform_device *pdev)
2974 {
2975 	struct net_device *ndev = platform_get_drvdata(pdev);
2976 
2977 	rtnl_lock();
2978 	netif_device_detach(ndev);
2979 
2980 	if (netif_running(ndev))
2981 		dev_close(ndev);
2982 
2983 	rtnl_unlock();
2984 }
2985 
axienet_suspend(struct device * dev)2986 static int axienet_suspend(struct device *dev)
2987 {
2988 	struct net_device *ndev = dev_get_drvdata(dev);
2989 
2990 	if (!netif_running(ndev))
2991 		return 0;
2992 
2993 	netif_device_detach(ndev);
2994 
2995 	rtnl_lock();
2996 	axienet_stop(ndev);
2997 	rtnl_unlock();
2998 
2999 	return 0;
3000 }
3001 
axienet_resume(struct device * dev)3002 static int axienet_resume(struct device *dev)
3003 {
3004 	struct net_device *ndev = dev_get_drvdata(dev);
3005 
3006 	if (!netif_running(ndev))
3007 		return 0;
3008 
3009 	rtnl_lock();
3010 	axienet_open(ndev);
3011 	rtnl_unlock();
3012 
3013 	netif_device_attach(ndev);
3014 
3015 	return 0;
3016 }
3017 
3018 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
3019 				axienet_suspend, axienet_resume);
3020 
3021 static struct platform_driver axienet_driver = {
3022 	.probe = axienet_probe,
3023 	.remove_new = axienet_remove,
3024 	.shutdown = axienet_shutdown,
3025 	.driver = {
3026 		 .name = "xilinx_axienet",
3027 		 .pm = &axienet_pm_ops,
3028 		 .of_match_table = axienet_of_match,
3029 	},
3030 };
3031 
3032 module_platform_driver(axienet_driver);
3033 
3034 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
3035 MODULE_AUTHOR("Xilinx");
3036 MODULE_LICENSE("GPL");
3037