• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /**
3  * sni_ave.c - Socionext UniPhier AVE ethernet driver
4  * Copyright 2014 Panasonic Corporation
5  * Copyright 2015-2017 Socionext Inc.
6  */
7 
8 #include <linux/bitops.h>
9 #include <linux/clk.h>
10 #include <linux/etherdevice.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/iopoll.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/mii.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/of_net.h>
19 #include <linux/of_mdio.h>
20 #include <linux/of_platform.h>
21 #include <linux/phy.h>
22 #include <linux/regmap.h>
23 #include <linux/reset.h>
24 #include <linux/types.h>
25 #include <linux/u64_stats_sync.h>
26 
27 /* General Register Group */
28 #define AVE_IDR			0x000	/* ID */
29 #define AVE_VR			0x004	/* Version */
30 #define AVE_GRR			0x008	/* Global Reset */
31 #define AVE_CFGR		0x00c	/* Configuration */
32 
33 /* Interrupt Register Group */
34 #define AVE_GIMR		0x100	/* Global Interrupt Mask */
35 #define AVE_GISR		0x104	/* Global Interrupt Status */
36 
37 /* MAC Register Group */
38 #define AVE_TXCR		0x200	/* TX Setup */
39 #define AVE_RXCR		0x204	/* RX Setup */
40 #define AVE_RXMAC1R		0x208	/* MAC address (lower) */
41 #define AVE_RXMAC2R		0x20c	/* MAC address (upper) */
42 #define AVE_MDIOCTR		0x214	/* MDIO Control */
43 #define AVE_MDIOAR		0x218	/* MDIO Address */
44 #define AVE_MDIOWDR		0x21c	/* MDIO Data */
45 #define AVE_MDIOSR		0x220	/* MDIO Status */
46 #define AVE_MDIORDR		0x224	/* MDIO Rd Data */
47 
48 /* Descriptor Control Register Group */
49 #define AVE_DESCC		0x300	/* Descriptor Control */
50 #define AVE_TXDC		0x304	/* TX Descriptor Configuration */
51 #define AVE_RXDC0		0x308	/* RX Descriptor Ring0 Configuration */
52 #define AVE_IIRQC		0x34c	/* Interval IRQ Control */
53 
54 /* Packet Filter Register Group */
55 #define AVE_PKTF_BASE		0x800	/* PF Base Address */
56 #define AVE_PFMBYTE_BASE	0xd00	/* PF Mask Byte Base Address */
57 #define AVE_PFMBIT_BASE		0xe00	/* PF Mask Bit Base Address */
58 #define AVE_PFSEL_BASE		0xf00	/* PF Selector Base Address */
59 #define AVE_PFEN		0xffc	/* Packet Filter Enable */
60 #define AVE_PKTF(ent)		(AVE_PKTF_BASE + (ent) * 0x40)
61 #define AVE_PFMBYTE(ent)	(AVE_PFMBYTE_BASE + (ent) * 8)
62 #define AVE_PFMBIT(ent)		(AVE_PFMBIT_BASE + (ent) * 4)
63 #define AVE_PFSEL(ent)		(AVE_PFSEL_BASE + (ent) * 4)
64 
65 /* 64bit descriptor memory */
66 #define AVE_DESC_SIZE_64	12	/* Descriptor Size */
67 
68 #define AVE_TXDM_64		0x1000	/* Tx Descriptor Memory */
69 #define AVE_RXDM_64		0x1c00	/* Rx Descriptor Memory */
70 
71 #define AVE_TXDM_SIZE_64	0x0ba0	/* Tx Descriptor Memory Size 3KB */
72 #define AVE_RXDM_SIZE_64	0x6000	/* Rx Descriptor Memory Size 24KB */
73 
74 /* 32bit descriptor memory */
75 #define AVE_DESC_SIZE_32	8	/* Descriptor Size */
76 
77 #define AVE_TXDM_32		0x1000	/* Tx Descriptor Memory */
78 #define AVE_RXDM_32		0x1800	/* Rx Descriptor Memory */
79 
80 #define AVE_TXDM_SIZE_32	0x07c0	/* Tx Descriptor Memory Size 2KB */
81 #define AVE_RXDM_SIZE_32	0x4000	/* Rx Descriptor Memory Size 16KB */
82 
83 /* RMII Bridge Register Group */
84 #define AVE_RSTCTRL		0x8028	/* Reset control */
85 #define AVE_RSTCTRL_RMIIRST	BIT(16)
86 #define AVE_LINKSEL		0x8034	/* Link speed setting */
87 #define AVE_LINKSEL_100M	BIT(0)
88 
89 /* AVE_GRR */
90 #define AVE_GRR_RXFFR		BIT(5)	/* Reset RxFIFO */
91 #define AVE_GRR_PHYRST		BIT(4)	/* Reset external PHY */
92 #define AVE_GRR_GRST		BIT(0)	/* Reset all MAC */
93 
94 /* AVE_CFGR */
95 #define AVE_CFGR_FLE		BIT(31)	/* Filter Function */
96 #define AVE_CFGR_CHE		BIT(30)	/* Checksum Function */
97 #define AVE_CFGR_MII		BIT(27)	/* Func mode (1:MII/RMII, 0:RGMII) */
98 #define AVE_CFGR_IPFCEN		BIT(24)	/* IP fragment sum Enable */
99 
100 /* AVE_GISR (common with GIMR) */
101 #define AVE_GI_PHY		BIT(24)	/* PHY interrupt */
102 #define AVE_GI_TX		BIT(16)	/* Tx complete */
103 #define AVE_GI_RXERR		BIT(8)	/* Receive frame more than max size */
104 #define AVE_GI_RXOVF		BIT(7)	/* Overflow at the RxFIFO */
105 #define AVE_GI_RXDROP		BIT(6)	/* Drop packet */
106 #define AVE_GI_RXIINT		BIT(5)	/* Interval interrupt */
107 
108 /* AVE_TXCR */
109 #define AVE_TXCR_FLOCTR		BIT(18)	/* Flow control */
110 #define AVE_TXCR_TXSPD_1G	BIT(17)
111 #define AVE_TXCR_TXSPD_100	BIT(16)
112 
113 /* AVE_RXCR */
114 #define AVE_RXCR_RXEN		BIT(30)	/* Rx enable */
115 #define AVE_RXCR_FDUPEN		BIT(22)	/* Interface mode */
116 #define AVE_RXCR_FLOCTR		BIT(21)	/* Flow control */
117 #define AVE_RXCR_AFEN		BIT(19)	/* MAC address filter */
118 #define AVE_RXCR_DRPEN		BIT(18)	/* Drop pause frame */
119 #define AVE_RXCR_MPSIZ_MASK	GENMASK(10, 0)
120 
121 /* AVE_MDIOCTR */
122 #define AVE_MDIOCTR_RREQ	BIT(3)	/* Read request */
123 #define AVE_MDIOCTR_WREQ	BIT(2)	/* Write request */
124 
125 /* AVE_MDIOSR */
126 #define AVE_MDIOSR_STS		BIT(0)	/* access status */
127 
128 /* AVE_DESCC */
129 #define AVE_DESCC_STATUS_MASK	GENMASK(31, 16)
130 #define AVE_DESCC_RD0		BIT(8)	/* Enable Rx descriptor Ring0 */
131 #define AVE_DESCC_RDSTP		BIT(4)	/* Pause Rx descriptor */
132 #define AVE_DESCC_TD		BIT(0)	/* Enable Tx descriptor */
133 
134 /* AVE_TXDC */
135 #define AVE_TXDC_SIZE		GENMASK(27, 16)	/* Size of Tx descriptor */
136 #define AVE_TXDC_ADDR		GENMASK(11, 0)	/* Start address */
137 #define AVE_TXDC_ADDR_START	0
138 
139 /* AVE_RXDC0 */
140 #define AVE_RXDC0_SIZE		GENMASK(30, 16)	/* Size of Rx descriptor */
141 #define AVE_RXDC0_ADDR		GENMASK(14, 0)	/* Start address */
142 #define AVE_RXDC0_ADDR_START	0
143 
144 /* AVE_IIRQC */
145 #define AVE_IIRQC_EN0		BIT(27)	/* Enable interval interrupt Ring0 */
146 #define AVE_IIRQC_BSCK		GENMASK(15, 0)	/* Interval count unit */
147 
148 /* Command status for descriptor */
149 #define AVE_STS_OWN		BIT(31)	/* Descriptor ownership */
150 #define AVE_STS_INTR		BIT(29)	/* Request for interrupt */
151 #define AVE_STS_OK		BIT(27)	/* Normal transmit */
152 /* TX */
153 #define AVE_STS_NOCSUM		BIT(28)	/* No use HW checksum */
154 #define AVE_STS_1ST		BIT(26)	/* Head of buffer chain */
155 #define AVE_STS_LAST		BIT(25)	/* Tail of buffer chain */
156 #define AVE_STS_OWC		BIT(21)	/* Out of window,Late Collision */
157 #define AVE_STS_EC		BIT(20)	/* Excess collision occurred */
158 #define AVE_STS_PKTLEN_TX_MASK	GENMASK(15, 0)
159 /* RX */
160 #define AVE_STS_CSSV		BIT(21)	/* Checksum check performed */
161 #define AVE_STS_CSER		BIT(20)	/* Checksum error detected */
162 #define AVE_STS_PKTLEN_RX_MASK	GENMASK(10, 0)
163 
164 /* Packet filter */
165 #define AVE_PFMBYTE_MASK0	(GENMASK(31, 8) | GENMASK(5, 0))
166 #define AVE_PFMBYTE_MASK1	GENMASK(25, 0)
167 #define AVE_PFMBIT_MASK		GENMASK(15, 0)
168 
169 #define AVE_PF_SIZE		17	/* Number of all packet filter */
170 #define AVE_PF_MULTICAST_SIZE	7	/* Number of multicast filter */
171 
172 #define AVE_PFNUM_FILTER	0	/* No.0 */
173 #define AVE_PFNUM_UNICAST	1	/* No.1 */
174 #define AVE_PFNUM_BROADCAST	2	/* No.2 */
175 #define AVE_PFNUM_MULTICAST	11	/* No.11-17 */
176 
177 /* NETIF Message control */
178 #define AVE_DEFAULT_MSG_ENABLE	(NETIF_MSG_DRV    |	\
179 				 NETIF_MSG_PROBE  |	\
180 				 NETIF_MSG_LINK   |	\
181 				 NETIF_MSG_TIMER  |	\
182 				 NETIF_MSG_IFDOWN |	\
183 				 NETIF_MSG_IFUP   |	\
184 				 NETIF_MSG_RX_ERR |	\
185 				 NETIF_MSG_TX_ERR)
186 
187 /* Parameter for descriptor */
188 #define AVE_NR_TXDESC		32	/* Tx descriptor */
189 #define AVE_NR_RXDESC		64	/* Rx descriptor */
190 
191 #define AVE_DESC_OFS_CMDSTS	0
192 #define AVE_DESC_OFS_ADDRL	4
193 #define AVE_DESC_OFS_ADDRU	8
194 
195 /* Parameter for ethernet frame */
196 #define AVE_MAX_ETHFRAME	1518
197 #define AVE_FRAME_HEADROOM	2
198 
199 /* Parameter for interrupt */
200 #define AVE_INTM_COUNT		20
201 #define AVE_FORCE_TXINTCNT	1
202 
203 /* SG */
204 #define SG_ETPINMODE		0x540
205 #define SG_ETPINMODE_EXTPHY	BIT(1)	/* for LD11 */
206 #define SG_ETPINMODE_RMII(ins)	BIT(ins)
207 
208 #define IS_DESC_64BIT(p)	((p)->data->is_desc_64bit)
209 
210 #define AVE_MAX_CLKS		4
211 #define AVE_MAX_RSTS		2
212 
213 enum desc_id {
214 	AVE_DESCID_RX,
215 	AVE_DESCID_TX,
216 };
217 
218 enum desc_state {
219 	AVE_DESC_RX_PERMIT,
220 	AVE_DESC_RX_SUSPEND,
221 	AVE_DESC_START,
222 	AVE_DESC_STOP,
223 };
224 
225 struct ave_desc {
226 	struct sk_buff	*skbs;
227 	dma_addr_t	skbs_dma;
228 	size_t		skbs_dmalen;
229 };
230 
231 struct ave_desc_info {
232 	u32	ndesc;		/* number of descriptor */
233 	u32	daddr;		/* start address of descriptor */
234 	u32	proc_idx;	/* index of processing packet */
235 	u32	done_idx;	/* index of processed packet */
236 	struct ave_desc *desc;	/* skb info related descriptor */
237 };
238 
239 struct ave_stats {
240 	struct	u64_stats_sync	syncp;
241 	u64	packets;
242 	u64	bytes;
243 	u64	errors;
244 	u64	dropped;
245 	u64	collisions;
246 	u64	fifo_errors;
247 };
248 
249 struct ave_private {
250 	void __iomem            *base;
251 	int                     irq;
252 	int			phy_id;
253 	unsigned int		desc_size;
254 	u32			msg_enable;
255 	int			nclks;
256 	struct clk		*clk[AVE_MAX_CLKS];
257 	int			nrsts;
258 	struct reset_control	*rst[AVE_MAX_RSTS];
259 	phy_interface_t		phy_mode;
260 	struct phy_device	*phydev;
261 	struct mii_bus		*mdio;
262 	struct regmap		*regmap;
263 	unsigned int		pinmode_mask;
264 	unsigned int		pinmode_val;
265 
266 	/* stats */
267 	struct ave_stats	stats_rx;
268 	struct ave_stats	stats_tx;
269 
270 	/* NAPI support */
271 	struct net_device	*ndev;
272 	struct napi_struct	napi_rx;
273 	struct napi_struct	napi_tx;
274 
275 	/* descriptor */
276 	struct ave_desc_info	rx;
277 	struct ave_desc_info	tx;
278 
279 	/* flow control */
280 	int pause_auto;
281 	int pause_rx;
282 	int pause_tx;
283 
284 	const struct ave_soc_data *data;
285 };
286 
287 struct ave_soc_data {
288 	bool	is_desc_64bit;
289 	const char	*clock_names[AVE_MAX_CLKS];
290 	const char	*reset_names[AVE_MAX_RSTS];
291 	int	(*get_pinmode)(struct ave_private *priv,
292 			       phy_interface_t phy_mode, u32 arg);
293 };
294 
ave_desc_read(struct net_device * ndev,enum desc_id id,int entry,int offset)295 static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry,
296 			 int offset)
297 {
298 	struct ave_private *priv = netdev_priv(ndev);
299 	u32 addr;
300 
301 	addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
302 		+ entry * priv->desc_size + offset;
303 
304 	return readl(priv->base + addr);
305 }
306 
ave_desc_read_cmdsts(struct net_device * ndev,enum desc_id id,int entry)307 static u32 ave_desc_read_cmdsts(struct net_device *ndev, enum desc_id id,
308 				int entry)
309 {
310 	return ave_desc_read(ndev, id, entry, AVE_DESC_OFS_CMDSTS);
311 }
312 
ave_desc_write(struct net_device * ndev,enum desc_id id,int entry,int offset,u32 val)313 static void ave_desc_write(struct net_device *ndev, enum desc_id id,
314 			   int entry, int offset, u32 val)
315 {
316 	struct ave_private *priv = netdev_priv(ndev);
317 	u32 addr;
318 
319 	addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
320 		+ entry * priv->desc_size + offset;
321 
322 	writel(val, priv->base + addr);
323 }
324 
ave_desc_write_cmdsts(struct net_device * ndev,enum desc_id id,int entry,u32 val)325 static void ave_desc_write_cmdsts(struct net_device *ndev, enum desc_id id,
326 				  int entry, u32 val)
327 {
328 	ave_desc_write(ndev, id, entry, AVE_DESC_OFS_CMDSTS, val);
329 }
330 
ave_desc_write_addr(struct net_device * ndev,enum desc_id id,int entry,dma_addr_t paddr)331 static void ave_desc_write_addr(struct net_device *ndev, enum desc_id id,
332 				int entry, dma_addr_t paddr)
333 {
334 	struct ave_private *priv = netdev_priv(ndev);
335 
336 	ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRL,
337 		       lower_32_bits(paddr));
338 	if (IS_DESC_64BIT(priv))
339 		ave_desc_write(ndev, id,
340 			       entry, AVE_DESC_OFS_ADDRU,
341 			       upper_32_bits(paddr));
342 }
343 
ave_irq_disable_all(struct net_device * ndev)344 static u32 ave_irq_disable_all(struct net_device *ndev)
345 {
346 	struct ave_private *priv = netdev_priv(ndev);
347 	u32 ret;
348 
349 	ret = readl(priv->base + AVE_GIMR);
350 	writel(0, priv->base + AVE_GIMR);
351 
352 	return ret;
353 }
354 
ave_irq_restore(struct net_device * ndev,u32 val)355 static void ave_irq_restore(struct net_device *ndev, u32 val)
356 {
357 	struct ave_private *priv = netdev_priv(ndev);
358 
359 	writel(val, priv->base + AVE_GIMR);
360 }
361 
ave_irq_enable(struct net_device * ndev,u32 bitflag)362 static void ave_irq_enable(struct net_device *ndev, u32 bitflag)
363 {
364 	struct ave_private *priv = netdev_priv(ndev);
365 
366 	writel(readl(priv->base + AVE_GIMR) | bitflag, priv->base + AVE_GIMR);
367 	writel(bitflag, priv->base + AVE_GISR);
368 }
369 
ave_hw_write_macaddr(struct net_device * ndev,const unsigned char * mac_addr,int reg1,int reg2)370 static void ave_hw_write_macaddr(struct net_device *ndev,
371 				 const unsigned char *mac_addr,
372 				 int reg1, int reg2)
373 {
374 	struct ave_private *priv = netdev_priv(ndev);
375 
376 	writel(mac_addr[0] | mac_addr[1] << 8 |
377 	       mac_addr[2] << 16 | mac_addr[3] << 24, priv->base + reg1);
378 	writel(mac_addr[4] | mac_addr[5] << 8, priv->base + reg2);
379 }
380 
ave_hw_read_version(struct net_device * ndev,char * buf,int len)381 static void ave_hw_read_version(struct net_device *ndev, char *buf, int len)
382 {
383 	struct ave_private *priv = netdev_priv(ndev);
384 	u32 major, minor, vr;
385 
386 	vr = readl(priv->base + AVE_VR);
387 	major = (vr & GENMASK(15, 8)) >> 8;
388 	minor = (vr & GENMASK(7, 0));
389 	snprintf(buf, len, "v%u.%u", major, minor);
390 }
391 
ave_ethtool_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * info)392 static void ave_ethtool_get_drvinfo(struct net_device *ndev,
393 				    struct ethtool_drvinfo *info)
394 {
395 	struct device *dev = ndev->dev.parent;
396 
397 	strlcpy(info->driver, dev->driver->name, sizeof(info->driver));
398 	strlcpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
399 	ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version));
400 }
401 
ave_ethtool_get_msglevel(struct net_device * ndev)402 static u32 ave_ethtool_get_msglevel(struct net_device *ndev)
403 {
404 	struct ave_private *priv = netdev_priv(ndev);
405 
406 	return priv->msg_enable;
407 }
408 
ave_ethtool_set_msglevel(struct net_device * ndev,u32 val)409 static void ave_ethtool_set_msglevel(struct net_device *ndev, u32 val)
410 {
411 	struct ave_private *priv = netdev_priv(ndev);
412 
413 	priv->msg_enable = val;
414 }
415 
ave_ethtool_get_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)416 static void ave_ethtool_get_wol(struct net_device *ndev,
417 				struct ethtool_wolinfo *wol)
418 {
419 	wol->supported = 0;
420 	wol->wolopts   = 0;
421 
422 	if (ndev->phydev)
423 		phy_ethtool_get_wol(ndev->phydev, wol);
424 }
425 
ave_ethtool_set_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)426 static int ave_ethtool_set_wol(struct net_device *ndev,
427 			       struct ethtool_wolinfo *wol)
428 {
429 	int ret;
430 
431 	if (!ndev->phydev ||
432 	    (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
433 		return -EOPNOTSUPP;
434 
435 	ret = phy_ethtool_set_wol(ndev->phydev, wol);
436 	if (!ret)
437 		device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
438 
439 	return ret;
440 }
441 
ave_ethtool_get_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * pause)442 static void ave_ethtool_get_pauseparam(struct net_device *ndev,
443 				       struct ethtool_pauseparam *pause)
444 {
445 	struct ave_private *priv = netdev_priv(ndev);
446 
447 	pause->autoneg  = priv->pause_auto;
448 	pause->rx_pause = priv->pause_rx;
449 	pause->tx_pause = priv->pause_tx;
450 }
451 
ave_ethtool_set_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * pause)452 static int ave_ethtool_set_pauseparam(struct net_device *ndev,
453 				      struct ethtool_pauseparam *pause)
454 {
455 	struct ave_private *priv = netdev_priv(ndev);
456 	struct phy_device *phydev = ndev->phydev;
457 
458 	if (!phydev)
459 		return -EINVAL;
460 
461 	priv->pause_auto = pause->autoneg;
462 	priv->pause_rx   = pause->rx_pause;
463 	priv->pause_tx   = pause->tx_pause;
464 
465 	phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
466 	if (pause->rx_pause)
467 		phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
468 	if (pause->tx_pause)
469 		phydev->advertising ^= ADVERTISED_Asym_Pause;
470 
471 	if (pause->autoneg) {
472 		if (netif_running(ndev))
473 			phy_start_aneg(phydev);
474 	}
475 
476 	return 0;
477 }
478 
479 static const struct ethtool_ops ave_ethtool_ops = {
480 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
481 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
482 	.get_drvinfo		= ave_ethtool_get_drvinfo,
483 	.nway_reset		= phy_ethtool_nway_reset,
484 	.get_link		= ethtool_op_get_link,
485 	.get_msglevel		= ave_ethtool_get_msglevel,
486 	.set_msglevel		= ave_ethtool_set_msglevel,
487 	.get_wol		= ave_ethtool_get_wol,
488 	.set_wol		= ave_ethtool_set_wol,
489 	.get_pauseparam         = ave_ethtool_get_pauseparam,
490 	.set_pauseparam         = ave_ethtool_set_pauseparam,
491 };
492 
ave_mdiobus_read(struct mii_bus * bus,int phyid,int regnum)493 static int ave_mdiobus_read(struct mii_bus *bus, int phyid, int regnum)
494 {
495 	struct net_device *ndev = bus->priv;
496 	struct ave_private *priv;
497 	u32 mdioctl, mdiosr;
498 	int ret;
499 
500 	priv = netdev_priv(ndev);
501 
502 	/* write address */
503 	writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
504 
505 	/* read request */
506 	mdioctl = readl(priv->base + AVE_MDIOCTR);
507 	writel((mdioctl | AVE_MDIOCTR_RREQ) & ~AVE_MDIOCTR_WREQ,
508 	       priv->base + AVE_MDIOCTR);
509 
510 	ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
511 				 !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
512 	if (ret) {
513 		netdev_err(ndev, "failed to read (phy:%d reg:%x)\n",
514 			   phyid, regnum);
515 		return ret;
516 	}
517 
518 	return readl(priv->base + AVE_MDIORDR) & GENMASK(15, 0);
519 }
520 
ave_mdiobus_write(struct mii_bus * bus,int phyid,int regnum,u16 val)521 static int ave_mdiobus_write(struct mii_bus *bus, int phyid, int regnum,
522 			     u16 val)
523 {
524 	struct net_device *ndev = bus->priv;
525 	struct ave_private *priv;
526 	u32 mdioctl, mdiosr;
527 	int ret;
528 
529 	priv = netdev_priv(ndev);
530 
531 	/* write address */
532 	writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
533 
534 	/* write data */
535 	writel(val, priv->base + AVE_MDIOWDR);
536 
537 	/* write request */
538 	mdioctl = readl(priv->base + AVE_MDIOCTR);
539 	writel((mdioctl | AVE_MDIOCTR_WREQ) & ~AVE_MDIOCTR_RREQ,
540 	       priv->base + AVE_MDIOCTR);
541 
542 	ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
543 				 !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
544 	if (ret)
545 		netdev_err(ndev, "failed to write (phy:%d reg:%x)\n",
546 			   phyid, regnum);
547 
548 	return ret;
549 }
550 
ave_dma_map(struct net_device * ndev,struct ave_desc * desc,void * ptr,size_t len,enum dma_data_direction dir,dma_addr_t * paddr)551 static int ave_dma_map(struct net_device *ndev, struct ave_desc *desc,
552 		       void *ptr, size_t len, enum dma_data_direction dir,
553 		       dma_addr_t *paddr)
554 {
555 	dma_addr_t map_addr;
556 
557 	map_addr = dma_map_single(ndev->dev.parent, ptr, len, dir);
558 	if (unlikely(dma_mapping_error(ndev->dev.parent, map_addr)))
559 		return -ENOMEM;
560 
561 	desc->skbs_dma = map_addr;
562 	desc->skbs_dmalen = len;
563 	*paddr = map_addr;
564 
565 	return 0;
566 }
567 
ave_dma_unmap(struct net_device * ndev,struct ave_desc * desc,enum dma_data_direction dir)568 static void ave_dma_unmap(struct net_device *ndev, struct ave_desc *desc,
569 			  enum dma_data_direction dir)
570 {
571 	if (!desc->skbs_dma)
572 		return;
573 
574 	dma_unmap_single(ndev->dev.parent,
575 			 desc->skbs_dma, desc->skbs_dmalen, dir);
576 	desc->skbs_dma = 0;
577 }
578 
579 /* Prepare Rx descriptor and memory */
ave_rxdesc_prepare(struct net_device * ndev,int entry)580 static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
581 {
582 	struct ave_private *priv = netdev_priv(ndev);
583 	struct sk_buff *skb;
584 	dma_addr_t paddr;
585 	int ret;
586 
587 	skb = priv->rx.desc[entry].skbs;
588 	if (!skb) {
589 		skb = netdev_alloc_skb(ndev, AVE_MAX_ETHFRAME);
590 		if (!skb) {
591 			netdev_err(ndev, "can't allocate skb for Rx\n");
592 			return -ENOMEM;
593 		}
594 		skb->data += AVE_FRAME_HEADROOM;
595 		skb->tail += AVE_FRAME_HEADROOM;
596 	}
597 
598 	/* set disable to cmdsts */
599 	ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
600 			      AVE_STS_INTR | AVE_STS_OWN);
601 
602 	/* map Rx buffer
603 	 * Rx buffer set to the Rx descriptor has two restrictions:
604 	 * - Rx buffer address is 4 byte aligned.
605 	 * - Rx buffer begins with 2 byte headroom, and data will be put from
606 	 *   (buffer + 2).
607 	 * To satisfy this, specify the address to put back the buffer
608 	 * pointer advanced by AVE_FRAME_HEADROOM, and expand the map size
609 	 * by AVE_FRAME_HEADROOM.
610 	 */
611 	ret = ave_dma_map(ndev, &priv->rx.desc[entry],
612 			  skb->data - AVE_FRAME_HEADROOM,
613 			  AVE_MAX_ETHFRAME + AVE_FRAME_HEADROOM,
614 			  DMA_FROM_DEVICE, &paddr);
615 	if (ret) {
616 		netdev_err(ndev, "can't map skb for Rx\n");
617 		dev_kfree_skb_any(skb);
618 		return ret;
619 	}
620 	priv->rx.desc[entry].skbs = skb;
621 
622 	/* set buffer pointer */
623 	ave_desc_write_addr(ndev, AVE_DESCID_RX, entry, paddr);
624 
625 	/* set enable to cmdsts */
626 	ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
627 			      AVE_STS_INTR | AVE_MAX_ETHFRAME);
628 
629 	return ret;
630 }
631 
632 /* Switch state of descriptor */
ave_desc_switch(struct net_device * ndev,enum desc_state state)633 static int ave_desc_switch(struct net_device *ndev, enum desc_state state)
634 {
635 	struct ave_private *priv = netdev_priv(ndev);
636 	int ret = 0;
637 	u32 val;
638 
639 	switch (state) {
640 	case AVE_DESC_START:
641 		writel(AVE_DESCC_TD | AVE_DESCC_RD0, priv->base + AVE_DESCC);
642 		break;
643 
644 	case AVE_DESC_STOP:
645 		writel(0, priv->base + AVE_DESCC);
646 		if (readl_poll_timeout(priv->base + AVE_DESCC, val, !val,
647 				       150, 15000)) {
648 			netdev_err(ndev, "can't stop descriptor\n");
649 			ret = -EBUSY;
650 		}
651 		break;
652 
653 	case AVE_DESC_RX_SUSPEND:
654 		val = readl(priv->base + AVE_DESCC);
655 		val |= AVE_DESCC_RDSTP;
656 		val &= ~AVE_DESCC_STATUS_MASK;
657 		writel(val, priv->base + AVE_DESCC);
658 		if (readl_poll_timeout(priv->base + AVE_DESCC, val,
659 				       val & (AVE_DESCC_RDSTP << 16),
660 				       150, 150000)) {
661 			netdev_err(ndev, "can't suspend descriptor\n");
662 			ret = -EBUSY;
663 		}
664 		break;
665 
666 	case AVE_DESC_RX_PERMIT:
667 		val = readl(priv->base + AVE_DESCC);
668 		val &= ~AVE_DESCC_RDSTP;
669 		val &= ~AVE_DESCC_STATUS_MASK;
670 		writel(val, priv->base + AVE_DESCC);
671 		break;
672 
673 	default:
674 		ret = -EINVAL;
675 		break;
676 	}
677 
678 	return ret;
679 }
680 
ave_tx_complete(struct net_device * ndev)681 static int ave_tx_complete(struct net_device *ndev)
682 {
683 	struct ave_private *priv = netdev_priv(ndev);
684 	u32 proc_idx, done_idx, ndesc, cmdsts;
685 	unsigned int nr_freebuf = 0;
686 	unsigned int tx_packets = 0;
687 	unsigned int tx_bytes = 0;
688 
689 	proc_idx = priv->tx.proc_idx;
690 	done_idx = priv->tx.done_idx;
691 	ndesc    = priv->tx.ndesc;
692 
693 	/* free pre-stored skb from done_idx to proc_idx */
694 	while (proc_idx != done_idx) {
695 		cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_TX, done_idx);
696 
697 		/* do nothing if owner is HW (==1 for Tx) */
698 		if (cmdsts & AVE_STS_OWN)
699 			break;
700 
701 		/* check Tx status and updates statistics */
702 		if (cmdsts & AVE_STS_OK) {
703 			tx_bytes += cmdsts & AVE_STS_PKTLEN_TX_MASK;
704 			/* success */
705 			if (cmdsts & AVE_STS_LAST)
706 				tx_packets++;
707 		} else {
708 			/* error */
709 			if (cmdsts & AVE_STS_LAST) {
710 				priv->stats_tx.errors++;
711 				if (cmdsts & (AVE_STS_OWC | AVE_STS_EC))
712 					priv->stats_tx.collisions++;
713 			}
714 		}
715 
716 		/* release skb */
717 		if (priv->tx.desc[done_idx].skbs) {
718 			ave_dma_unmap(ndev, &priv->tx.desc[done_idx],
719 				      DMA_TO_DEVICE);
720 			dev_consume_skb_any(priv->tx.desc[done_idx].skbs);
721 			priv->tx.desc[done_idx].skbs = NULL;
722 			nr_freebuf++;
723 		}
724 		done_idx = (done_idx + 1) % ndesc;
725 	}
726 
727 	priv->tx.done_idx = done_idx;
728 
729 	/* update stats */
730 	u64_stats_update_begin(&priv->stats_tx.syncp);
731 	priv->stats_tx.packets += tx_packets;
732 	priv->stats_tx.bytes   += tx_bytes;
733 	u64_stats_update_end(&priv->stats_tx.syncp);
734 
735 	/* wake queue for freeing buffer */
736 	if (unlikely(netif_queue_stopped(ndev)) && nr_freebuf)
737 		netif_wake_queue(ndev);
738 
739 	return nr_freebuf;
740 }
741 
ave_rx_receive(struct net_device * ndev,int num)742 static int ave_rx_receive(struct net_device *ndev, int num)
743 {
744 	struct ave_private *priv = netdev_priv(ndev);
745 	unsigned int rx_packets = 0;
746 	unsigned int rx_bytes = 0;
747 	u32 proc_idx, done_idx;
748 	struct sk_buff *skb;
749 	unsigned int pktlen;
750 	int restpkt, npkts;
751 	u32 ndesc, cmdsts;
752 
753 	proc_idx = priv->rx.proc_idx;
754 	done_idx = priv->rx.done_idx;
755 	ndesc    = priv->rx.ndesc;
756 	restpkt  = ((proc_idx + ndesc - 1) - done_idx) % ndesc;
757 
758 	for (npkts = 0; npkts < num; npkts++) {
759 		/* we can't receive more packet, so fill desc quickly */
760 		if (--restpkt < 0)
761 			break;
762 
763 		cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_RX, proc_idx);
764 
765 		/* do nothing if owner is HW (==0 for Rx) */
766 		if (!(cmdsts & AVE_STS_OWN))
767 			break;
768 
769 		if (!(cmdsts & AVE_STS_OK)) {
770 			priv->stats_rx.errors++;
771 			proc_idx = (proc_idx + 1) % ndesc;
772 			continue;
773 		}
774 
775 		pktlen = cmdsts & AVE_STS_PKTLEN_RX_MASK;
776 
777 		/* get skbuff for rx */
778 		skb = priv->rx.desc[proc_idx].skbs;
779 		priv->rx.desc[proc_idx].skbs = NULL;
780 
781 		ave_dma_unmap(ndev, &priv->rx.desc[proc_idx], DMA_FROM_DEVICE);
782 
783 		skb->dev = ndev;
784 		skb_put(skb, pktlen);
785 		skb->protocol = eth_type_trans(skb, ndev);
786 
787 		if ((cmdsts & AVE_STS_CSSV) && (!(cmdsts & AVE_STS_CSER)))
788 			skb->ip_summed = CHECKSUM_UNNECESSARY;
789 
790 		rx_packets++;
791 		rx_bytes += pktlen;
792 
793 		netif_receive_skb(skb);
794 
795 		proc_idx = (proc_idx + 1) % ndesc;
796 	}
797 
798 	priv->rx.proc_idx = proc_idx;
799 
800 	/* update stats */
801 	u64_stats_update_begin(&priv->stats_rx.syncp);
802 	priv->stats_rx.packets += rx_packets;
803 	priv->stats_rx.bytes   += rx_bytes;
804 	u64_stats_update_end(&priv->stats_rx.syncp);
805 
806 	/* refill the Rx buffers */
807 	while (proc_idx != done_idx) {
808 		if (ave_rxdesc_prepare(ndev, done_idx))
809 			break;
810 		done_idx = (done_idx + 1) % ndesc;
811 	}
812 
813 	priv->rx.done_idx = done_idx;
814 
815 	return npkts;
816 }
817 
ave_napi_poll_rx(struct napi_struct * napi,int budget)818 static int ave_napi_poll_rx(struct napi_struct *napi, int budget)
819 {
820 	struct ave_private *priv;
821 	struct net_device *ndev;
822 	int num;
823 
824 	priv = container_of(napi, struct ave_private, napi_rx);
825 	ndev = priv->ndev;
826 
827 	num = ave_rx_receive(ndev, budget);
828 	if (num < budget) {
829 		napi_complete_done(napi, num);
830 
831 		/* enable Rx interrupt when NAPI finishes */
832 		ave_irq_enable(ndev, AVE_GI_RXIINT);
833 	}
834 
835 	return num;
836 }
837 
ave_napi_poll_tx(struct napi_struct * napi,int budget)838 static int ave_napi_poll_tx(struct napi_struct *napi, int budget)
839 {
840 	struct ave_private *priv;
841 	struct net_device *ndev;
842 	int num;
843 
844 	priv = container_of(napi, struct ave_private, napi_tx);
845 	ndev = priv->ndev;
846 
847 	num = ave_tx_complete(ndev);
848 	napi_complete(napi);
849 
850 	/* enable Tx interrupt when NAPI finishes */
851 	ave_irq_enable(ndev, AVE_GI_TX);
852 
853 	return num;
854 }
855 
ave_global_reset(struct net_device * ndev)856 static void ave_global_reset(struct net_device *ndev)
857 {
858 	struct ave_private *priv = netdev_priv(ndev);
859 	u32 val;
860 
861 	/* set config register */
862 	val = AVE_CFGR_FLE | AVE_CFGR_IPFCEN | AVE_CFGR_CHE;
863 	if (!phy_interface_mode_is_rgmii(priv->phy_mode))
864 		val |= AVE_CFGR_MII;
865 	writel(val, priv->base + AVE_CFGR);
866 
867 	/* reset RMII register */
868 	val = readl(priv->base + AVE_RSTCTRL);
869 	val &= ~AVE_RSTCTRL_RMIIRST;
870 	writel(val, priv->base + AVE_RSTCTRL);
871 
872 	/* assert reset */
873 	writel(AVE_GRR_GRST | AVE_GRR_PHYRST, priv->base + AVE_GRR);
874 	msleep(20);
875 
876 	/* 1st, negate PHY reset only */
877 	writel(AVE_GRR_GRST, priv->base + AVE_GRR);
878 	msleep(40);
879 
880 	/* negate reset */
881 	writel(0, priv->base + AVE_GRR);
882 	msleep(40);
883 
884 	/* negate RMII register */
885 	val = readl(priv->base + AVE_RSTCTRL);
886 	val |= AVE_RSTCTRL_RMIIRST;
887 	writel(val, priv->base + AVE_RSTCTRL);
888 
889 	ave_irq_disable_all(ndev);
890 }
891 
ave_rxfifo_reset(struct net_device * ndev)892 static void ave_rxfifo_reset(struct net_device *ndev)
893 {
894 	struct ave_private *priv = netdev_priv(ndev);
895 	u32 rxcr_org;
896 
897 	/* save and disable MAC receive op */
898 	rxcr_org = readl(priv->base + AVE_RXCR);
899 	writel(rxcr_org & (~AVE_RXCR_RXEN), priv->base + AVE_RXCR);
900 
901 	/* suspend Rx descriptor */
902 	ave_desc_switch(ndev, AVE_DESC_RX_SUSPEND);
903 
904 	/* receive all packets before descriptor starts */
905 	ave_rx_receive(ndev, priv->rx.ndesc);
906 
907 	/* assert reset */
908 	writel(AVE_GRR_RXFFR, priv->base + AVE_GRR);
909 	udelay(50);
910 
911 	/* negate reset */
912 	writel(0, priv->base + AVE_GRR);
913 	udelay(20);
914 
915 	/* negate interrupt status */
916 	writel(AVE_GI_RXOVF, priv->base + AVE_GISR);
917 
918 	/* permit descriptor */
919 	ave_desc_switch(ndev, AVE_DESC_RX_PERMIT);
920 
921 	/* restore MAC reccieve op */
922 	writel(rxcr_org, priv->base + AVE_RXCR);
923 }
924 
ave_irq_handler(int irq,void * netdev)925 static irqreturn_t ave_irq_handler(int irq, void *netdev)
926 {
927 	struct net_device *ndev = (struct net_device *)netdev;
928 	struct ave_private *priv = netdev_priv(ndev);
929 	u32 gimr_val, gisr_val;
930 
931 	gimr_val = ave_irq_disable_all(ndev);
932 
933 	/* get interrupt status */
934 	gisr_val = readl(priv->base + AVE_GISR);
935 
936 	/* PHY */
937 	if (gisr_val & AVE_GI_PHY)
938 		writel(AVE_GI_PHY, priv->base + AVE_GISR);
939 
940 	/* check exceeding packet */
941 	if (gisr_val & AVE_GI_RXERR) {
942 		writel(AVE_GI_RXERR, priv->base + AVE_GISR);
943 		netdev_err(ndev, "receive a packet exceeding frame buffer\n");
944 	}
945 
946 	gisr_val &= gimr_val;
947 	if (!gisr_val)
948 		goto exit_isr;
949 
950 	/* RxFIFO overflow */
951 	if (gisr_val & AVE_GI_RXOVF) {
952 		priv->stats_rx.fifo_errors++;
953 		ave_rxfifo_reset(ndev);
954 		goto exit_isr;
955 	}
956 
957 	/* Rx drop */
958 	if (gisr_val & AVE_GI_RXDROP) {
959 		priv->stats_rx.dropped++;
960 		writel(AVE_GI_RXDROP, priv->base + AVE_GISR);
961 	}
962 
963 	/* Rx interval */
964 	if (gisr_val & AVE_GI_RXIINT) {
965 		napi_schedule(&priv->napi_rx);
966 		/* still force to disable Rx interrupt until NAPI finishes */
967 		gimr_val &= ~AVE_GI_RXIINT;
968 	}
969 
970 	/* Tx completed */
971 	if (gisr_val & AVE_GI_TX) {
972 		napi_schedule(&priv->napi_tx);
973 		/* still force to disable Tx interrupt until NAPI finishes */
974 		gimr_val &= ~AVE_GI_TX;
975 	}
976 
977 exit_isr:
978 	ave_irq_restore(ndev, gimr_val);
979 
980 	return IRQ_HANDLED;
981 }
982 
ave_pfsel_start(struct net_device * ndev,unsigned int entry)983 static int ave_pfsel_start(struct net_device *ndev, unsigned int entry)
984 {
985 	struct ave_private *priv = netdev_priv(ndev);
986 	u32 val;
987 
988 	if (WARN_ON(entry > AVE_PF_SIZE))
989 		return -EINVAL;
990 
991 	val = readl(priv->base + AVE_PFEN);
992 	writel(val | BIT(entry), priv->base + AVE_PFEN);
993 
994 	return 0;
995 }
996 
ave_pfsel_stop(struct net_device * ndev,unsigned int entry)997 static int ave_pfsel_stop(struct net_device *ndev, unsigned int entry)
998 {
999 	struct ave_private *priv = netdev_priv(ndev);
1000 	u32 val;
1001 
1002 	if (WARN_ON(entry > AVE_PF_SIZE))
1003 		return -EINVAL;
1004 
1005 	val = readl(priv->base + AVE_PFEN);
1006 	writel(val & ~BIT(entry), priv->base + AVE_PFEN);
1007 
1008 	return 0;
1009 }
1010 
ave_pfsel_set_macaddr(struct net_device * ndev,unsigned int entry,const unsigned char * mac_addr,unsigned int set_size)1011 static int ave_pfsel_set_macaddr(struct net_device *ndev,
1012 				 unsigned int entry,
1013 				 const unsigned char *mac_addr,
1014 				 unsigned int set_size)
1015 {
1016 	struct ave_private *priv = netdev_priv(ndev);
1017 
1018 	if (WARN_ON(entry > AVE_PF_SIZE))
1019 		return -EINVAL;
1020 	if (WARN_ON(set_size > 6))
1021 		return -EINVAL;
1022 
1023 	ave_pfsel_stop(ndev, entry);
1024 
1025 	/* set MAC address for the filter */
1026 	ave_hw_write_macaddr(ndev, mac_addr,
1027 			     AVE_PKTF(entry), AVE_PKTF(entry) + 4);
1028 
1029 	/* set byte mask */
1030 	writel(GENMASK(31, set_size) & AVE_PFMBYTE_MASK0,
1031 	       priv->base + AVE_PFMBYTE(entry));
1032 	writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
1033 
1034 	/* set bit mask filter */
1035 	writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
1036 
1037 	/* set selector to ring 0 */
1038 	writel(0, priv->base + AVE_PFSEL(entry));
1039 
1040 	/* restart filter */
1041 	ave_pfsel_start(ndev, entry);
1042 
1043 	return 0;
1044 }
1045 
ave_pfsel_set_promisc(struct net_device * ndev,unsigned int entry,u32 rxring)1046 static void ave_pfsel_set_promisc(struct net_device *ndev,
1047 				  unsigned int entry, u32 rxring)
1048 {
1049 	struct ave_private *priv = netdev_priv(ndev);
1050 
1051 	if (WARN_ON(entry > AVE_PF_SIZE))
1052 		return;
1053 
1054 	ave_pfsel_stop(ndev, entry);
1055 
1056 	/* set byte mask */
1057 	writel(AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry));
1058 	writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
1059 
1060 	/* set bit mask filter */
1061 	writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
1062 
1063 	/* set selector to rxring */
1064 	writel(rxring, priv->base + AVE_PFSEL(entry));
1065 
1066 	ave_pfsel_start(ndev, entry);
1067 }
1068 
ave_pfsel_init(struct net_device * ndev)1069 static void ave_pfsel_init(struct net_device *ndev)
1070 {
1071 	unsigned char bcast_mac[ETH_ALEN];
1072 	int i;
1073 
1074 	eth_broadcast_addr(bcast_mac);
1075 
1076 	for (i = 0; i < AVE_PF_SIZE; i++)
1077 		ave_pfsel_stop(ndev, i);
1078 
1079 	/* promiscious entry, select ring 0 */
1080 	ave_pfsel_set_promisc(ndev, AVE_PFNUM_FILTER, 0);
1081 
1082 	/* unicast entry */
1083 	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
1084 
1085 	/* broadcast entry */
1086 	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_BROADCAST, bcast_mac, 6);
1087 }
1088 
ave_phy_adjust_link(struct net_device * ndev)1089 static void ave_phy_adjust_link(struct net_device *ndev)
1090 {
1091 	struct ave_private *priv = netdev_priv(ndev);
1092 	struct phy_device *phydev = ndev->phydev;
1093 	u32 val, txcr, rxcr, rxcr_org;
1094 	u16 rmt_adv = 0, lcl_adv = 0;
1095 	u8 cap;
1096 
1097 	/* set RGMII speed */
1098 	val = readl(priv->base + AVE_TXCR);
1099 	val &= ~(AVE_TXCR_TXSPD_100 | AVE_TXCR_TXSPD_1G);
1100 
1101 	if (phy_interface_is_rgmii(phydev) && phydev->speed == SPEED_1000)
1102 		val |= AVE_TXCR_TXSPD_1G;
1103 	else if (phydev->speed == SPEED_100)
1104 		val |= AVE_TXCR_TXSPD_100;
1105 
1106 	writel(val, priv->base + AVE_TXCR);
1107 
1108 	/* set RMII speed (100M/10M only) */
1109 	if (!phy_interface_is_rgmii(phydev)) {
1110 		val = readl(priv->base + AVE_LINKSEL);
1111 		if (phydev->speed == SPEED_10)
1112 			val &= ~AVE_LINKSEL_100M;
1113 		else
1114 			val |= AVE_LINKSEL_100M;
1115 		writel(val, priv->base + AVE_LINKSEL);
1116 	}
1117 
1118 	/* check current RXCR/TXCR */
1119 	rxcr = readl(priv->base + AVE_RXCR);
1120 	txcr = readl(priv->base + AVE_TXCR);
1121 	rxcr_org = rxcr;
1122 
1123 	if (phydev->duplex) {
1124 		rxcr |= AVE_RXCR_FDUPEN;
1125 
1126 		if (phydev->pause)
1127 			rmt_adv |= LPA_PAUSE_CAP;
1128 		if (phydev->asym_pause)
1129 			rmt_adv |= LPA_PAUSE_ASYM;
1130 		if (phydev->advertising & ADVERTISED_Pause)
1131 			lcl_adv |= ADVERTISE_PAUSE_CAP;
1132 		if (phydev->advertising & ADVERTISED_Asym_Pause)
1133 			lcl_adv |= ADVERTISE_PAUSE_ASYM;
1134 
1135 		cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1136 		if (cap & FLOW_CTRL_TX)
1137 			txcr |= AVE_TXCR_FLOCTR;
1138 		else
1139 			txcr &= ~AVE_TXCR_FLOCTR;
1140 		if (cap & FLOW_CTRL_RX)
1141 			rxcr |= AVE_RXCR_FLOCTR;
1142 		else
1143 			rxcr &= ~AVE_RXCR_FLOCTR;
1144 	} else {
1145 		rxcr &= ~AVE_RXCR_FDUPEN;
1146 		rxcr &= ~AVE_RXCR_FLOCTR;
1147 		txcr &= ~AVE_TXCR_FLOCTR;
1148 	}
1149 
1150 	if (rxcr_org != rxcr) {
1151 		/* disable Rx mac */
1152 		writel(rxcr & ~AVE_RXCR_RXEN, priv->base + AVE_RXCR);
1153 		/* change and enable TX/Rx mac */
1154 		writel(txcr, priv->base + AVE_TXCR);
1155 		writel(rxcr, priv->base + AVE_RXCR);
1156 	}
1157 
1158 	phy_print_status(phydev);
1159 }
1160 
ave_macaddr_init(struct net_device * ndev)1161 static void ave_macaddr_init(struct net_device *ndev)
1162 {
1163 	ave_hw_write_macaddr(ndev, ndev->dev_addr, AVE_RXMAC1R, AVE_RXMAC2R);
1164 
1165 	/* pfsel unicast entry */
1166 	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
1167 }
1168 
ave_init(struct net_device * ndev)1169 static int ave_init(struct net_device *ndev)
1170 {
1171 	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1172 	struct ave_private *priv = netdev_priv(ndev);
1173 	struct device *dev = ndev->dev.parent;
1174 	struct device_node *np = dev->of_node;
1175 	struct device_node *mdio_np;
1176 	struct phy_device *phydev;
1177 	int nc, nr, ret;
1178 
1179 	/* enable clk because of hw access until ndo_open */
1180 	for (nc = 0; nc < priv->nclks; nc++) {
1181 		ret = clk_prepare_enable(priv->clk[nc]);
1182 		if (ret) {
1183 			dev_err(dev, "can't enable clock\n");
1184 			goto out_clk_disable;
1185 		}
1186 	}
1187 
1188 	for (nr = 0; nr < priv->nrsts; nr++) {
1189 		ret = reset_control_deassert(priv->rst[nr]);
1190 		if (ret) {
1191 			dev_err(dev, "can't deassert reset\n");
1192 			goto out_reset_assert;
1193 		}
1194 	}
1195 
1196 	ret = regmap_update_bits(priv->regmap, SG_ETPINMODE,
1197 				 priv->pinmode_mask, priv->pinmode_val);
1198 	if (ret)
1199 		goto out_reset_assert;
1200 
1201 	ave_global_reset(ndev);
1202 
1203 	mdio_np = of_get_child_by_name(np, "mdio");
1204 	if (!mdio_np) {
1205 		dev_err(dev, "mdio node not found\n");
1206 		ret = -EINVAL;
1207 		goto out_reset_assert;
1208 	}
1209 	ret = of_mdiobus_register(priv->mdio, mdio_np);
1210 	of_node_put(mdio_np);
1211 	if (ret) {
1212 		dev_err(dev, "failed to register mdiobus\n");
1213 		goto out_reset_assert;
1214 	}
1215 
1216 	phydev = of_phy_get_and_connect(ndev, np, ave_phy_adjust_link);
1217 	if (!phydev) {
1218 		dev_err(dev, "could not attach to PHY\n");
1219 		ret = -ENODEV;
1220 		goto out_mdio_unregister;
1221 	}
1222 
1223 	priv->phydev = phydev;
1224 
1225 	phy_ethtool_get_wol(phydev, &wol);
1226 	device_set_wakeup_capable(&ndev->dev, !!wol.supported);
1227 
1228 	if (!phy_interface_is_rgmii(phydev)) {
1229 		phydev->supported &= ~PHY_GBIT_FEATURES;
1230 		phydev->supported |= PHY_BASIC_FEATURES;
1231 	}
1232 	phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1233 
1234 	phy_attached_info(phydev);
1235 
1236 	return 0;
1237 
1238 out_mdio_unregister:
1239 	mdiobus_unregister(priv->mdio);
1240 out_reset_assert:
1241 	while (--nr >= 0)
1242 		reset_control_assert(priv->rst[nr]);
1243 out_clk_disable:
1244 	while (--nc >= 0)
1245 		clk_disable_unprepare(priv->clk[nc]);
1246 
1247 	return ret;
1248 }
1249 
ave_uninit(struct net_device * ndev)1250 static void ave_uninit(struct net_device *ndev)
1251 {
1252 	struct ave_private *priv = netdev_priv(ndev);
1253 	int i;
1254 
1255 	phy_disconnect(priv->phydev);
1256 	mdiobus_unregister(priv->mdio);
1257 
1258 	/* disable clk because of hw access after ndo_stop */
1259 	for (i = 0; i < priv->nrsts; i++)
1260 		reset_control_assert(priv->rst[i]);
1261 	for (i = 0; i < priv->nclks; i++)
1262 		clk_disable_unprepare(priv->clk[i]);
1263 }
1264 
ave_open(struct net_device * ndev)1265 static int ave_open(struct net_device *ndev)
1266 {
1267 	struct ave_private *priv = netdev_priv(ndev);
1268 	int entry;
1269 	int ret;
1270 	u32 val;
1271 
1272 	ret = request_irq(priv->irq, ave_irq_handler, IRQF_SHARED, ndev->name,
1273 			  ndev);
1274 	if (ret)
1275 		return ret;
1276 
1277 	priv->tx.desc = kcalloc(priv->tx.ndesc, sizeof(*priv->tx.desc),
1278 				GFP_KERNEL);
1279 	if (!priv->tx.desc) {
1280 		ret = -ENOMEM;
1281 		goto out_free_irq;
1282 	}
1283 
1284 	priv->rx.desc = kcalloc(priv->rx.ndesc, sizeof(*priv->rx.desc),
1285 				GFP_KERNEL);
1286 	if (!priv->rx.desc) {
1287 		kfree(priv->tx.desc);
1288 		ret = -ENOMEM;
1289 		goto out_free_irq;
1290 	}
1291 
1292 	/* initialize Tx work and descriptor */
1293 	priv->tx.proc_idx = 0;
1294 	priv->tx.done_idx = 0;
1295 	for (entry = 0; entry < priv->tx.ndesc; entry++) {
1296 		ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, entry, 0);
1297 		ave_desc_write_addr(ndev, AVE_DESCID_TX, entry, 0);
1298 	}
1299 	writel(AVE_TXDC_ADDR_START |
1300 	       (((priv->tx.ndesc * priv->desc_size) << 16) & AVE_TXDC_SIZE),
1301 	       priv->base + AVE_TXDC);
1302 
1303 	/* initialize Rx work and descriptor */
1304 	priv->rx.proc_idx = 0;
1305 	priv->rx.done_idx = 0;
1306 	for (entry = 0; entry < priv->rx.ndesc; entry++) {
1307 		if (ave_rxdesc_prepare(ndev, entry))
1308 			break;
1309 	}
1310 	writel(AVE_RXDC0_ADDR_START |
1311 	       (((priv->rx.ndesc * priv->desc_size) << 16) & AVE_RXDC0_SIZE),
1312 	       priv->base + AVE_RXDC0);
1313 
1314 	ave_desc_switch(ndev, AVE_DESC_START);
1315 
1316 	ave_pfsel_init(ndev);
1317 	ave_macaddr_init(ndev);
1318 
1319 	/* set Rx configuration */
1320 	/* full duplex, enable pause drop, enalbe flow control */
1321 	val = AVE_RXCR_RXEN | AVE_RXCR_FDUPEN | AVE_RXCR_DRPEN |
1322 		AVE_RXCR_FLOCTR | (AVE_MAX_ETHFRAME & AVE_RXCR_MPSIZ_MASK);
1323 	writel(val, priv->base + AVE_RXCR);
1324 
1325 	/* set Tx configuration */
1326 	/* enable flow control, disable loopback */
1327 	writel(AVE_TXCR_FLOCTR, priv->base + AVE_TXCR);
1328 
1329 	/* enable timer, clear EN,INTM, and mask interval unit(BSCK) */
1330 	val = readl(priv->base + AVE_IIRQC) & AVE_IIRQC_BSCK;
1331 	val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);
1332 	writel(val, priv->base + AVE_IIRQC);
1333 
1334 	val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX | AVE_GI_RXDROP;
1335 	ave_irq_restore(ndev, val);
1336 
1337 	napi_enable(&priv->napi_rx);
1338 	napi_enable(&priv->napi_tx);
1339 
1340 	phy_start(ndev->phydev);
1341 	phy_start_aneg(ndev->phydev);
1342 	netif_start_queue(ndev);
1343 
1344 	return 0;
1345 
1346 out_free_irq:
1347 	disable_irq(priv->irq);
1348 	free_irq(priv->irq, ndev);
1349 
1350 	return ret;
1351 }
1352 
ave_stop(struct net_device * ndev)1353 static int ave_stop(struct net_device *ndev)
1354 {
1355 	struct ave_private *priv = netdev_priv(ndev);
1356 	int entry;
1357 
1358 	ave_irq_disable_all(ndev);
1359 	disable_irq(priv->irq);
1360 	free_irq(priv->irq, ndev);
1361 
1362 	netif_tx_disable(ndev);
1363 	phy_stop(ndev->phydev);
1364 	napi_disable(&priv->napi_tx);
1365 	napi_disable(&priv->napi_rx);
1366 
1367 	ave_desc_switch(ndev, AVE_DESC_STOP);
1368 
1369 	/* free Tx buffer */
1370 	for (entry = 0; entry < priv->tx.ndesc; entry++) {
1371 		if (!priv->tx.desc[entry].skbs)
1372 			continue;
1373 
1374 		ave_dma_unmap(ndev, &priv->tx.desc[entry], DMA_TO_DEVICE);
1375 		dev_kfree_skb_any(priv->tx.desc[entry].skbs);
1376 		priv->tx.desc[entry].skbs = NULL;
1377 	}
1378 	priv->tx.proc_idx = 0;
1379 	priv->tx.done_idx = 0;
1380 
1381 	/* free Rx buffer */
1382 	for (entry = 0; entry < priv->rx.ndesc; entry++) {
1383 		if (!priv->rx.desc[entry].skbs)
1384 			continue;
1385 
1386 		ave_dma_unmap(ndev, &priv->rx.desc[entry], DMA_FROM_DEVICE);
1387 		dev_kfree_skb_any(priv->rx.desc[entry].skbs);
1388 		priv->rx.desc[entry].skbs = NULL;
1389 	}
1390 	priv->rx.proc_idx = 0;
1391 	priv->rx.done_idx = 0;
1392 
1393 	kfree(priv->tx.desc);
1394 	kfree(priv->rx.desc);
1395 
1396 	return 0;
1397 }
1398 
ave_start_xmit(struct sk_buff * skb,struct net_device * ndev)1399 static int ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1400 {
1401 	struct ave_private *priv = netdev_priv(ndev);
1402 	u32 proc_idx, done_idx, ndesc, cmdsts;
1403 	int ret, freepkt;
1404 	dma_addr_t paddr;
1405 
1406 	proc_idx = priv->tx.proc_idx;
1407 	done_idx = priv->tx.done_idx;
1408 	ndesc = priv->tx.ndesc;
1409 	freepkt = ((done_idx + ndesc - 1) - proc_idx) % ndesc;
1410 
1411 	/* stop queue when not enough entry */
1412 	if (unlikely(freepkt < 1)) {
1413 		netif_stop_queue(ndev);
1414 		return NETDEV_TX_BUSY;
1415 	}
1416 
1417 	/* add padding for short packet */
1418 	if (skb_put_padto(skb, ETH_ZLEN)) {
1419 		priv->stats_tx.dropped++;
1420 		return NETDEV_TX_OK;
1421 	}
1422 
1423 	/* map Tx buffer
1424 	 * Tx buffer set to the Tx descriptor doesn't have any restriction.
1425 	 */
1426 	ret = ave_dma_map(ndev, &priv->tx.desc[proc_idx],
1427 			  skb->data, skb->len, DMA_TO_DEVICE, &paddr);
1428 	if (ret) {
1429 		dev_kfree_skb_any(skb);
1430 		priv->stats_tx.dropped++;
1431 		return NETDEV_TX_OK;
1432 	}
1433 
1434 	priv->tx.desc[proc_idx].skbs = skb;
1435 
1436 	ave_desc_write_addr(ndev, AVE_DESCID_TX, proc_idx, paddr);
1437 
1438 	cmdsts = AVE_STS_OWN | AVE_STS_1ST | AVE_STS_LAST |
1439 		(skb->len & AVE_STS_PKTLEN_TX_MASK);
1440 
1441 	/* set interrupt per AVE_FORCE_TXINTCNT or when queue is stopped */
1442 	if (!(proc_idx % AVE_FORCE_TXINTCNT) || netif_queue_stopped(ndev))
1443 		cmdsts |= AVE_STS_INTR;
1444 
1445 	/* disable checksum calculation when skb doesn't calurate checksum */
1446 	if (skb->ip_summed == CHECKSUM_NONE ||
1447 	    skb->ip_summed == CHECKSUM_UNNECESSARY)
1448 		cmdsts |= AVE_STS_NOCSUM;
1449 
1450 	ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, proc_idx, cmdsts);
1451 
1452 	priv->tx.proc_idx = (proc_idx + 1) % ndesc;
1453 
1454 	return NETDEV_TX_OK;
1455 }
1456 
ave_ioctl(struct net_device * ndev,struct ifreq * ifr,int cmd)1457 static int ave_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
1458 {
1459 	return phy_mii_ioctl(ndev->phydev, ifr, cmd);
1460 }
1461 
1462 static const u8 v4multi_macadr[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
1463 static const u8 v6multi_macadr[] = { 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 };
1464 
ave_set_rx_mode(struct net_device * ndev)1465 static void ave_set_rx_mode(struct net_device *ndev)
1466 {
1467 	struct ave_private *priv = netdev_priv(ndev);
1468 	struct netdev_hw_addr *hw_adr;
1469 	int count, mc_cnt;
1470 	u32 val;
1471 
1472 	/* MAC addr filter enable for promiscious mode */
1473 	mc_cnt = netdev_mc_count(ndev);
1474 	val = readl(priv->base + AVE_RXCR);
1475 	if (ndev->flags & IFF_PROMISC || !mc_cnt)
1476 		val &= ~AVE_RXCR_AFEN;
1477 	else
1478 		val |= AVE_RXCR_AFEN;
1479 	writel(val, priv->base + AVE_RXCR);
1480 
1481 	/* set all multicast address */
1482 	if ((ndev->flags & IFF_ALLMULTI) || mc_cnt > AVE_PF_MULTICAST_SIZE) {
1483 		ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST,
1484 				      v4multi_macadr, 1);
1485 		ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + 1,
1486 				      v6multi_macadr, 1);
1487 	} else {
1488 		/* stop all multicast filter */
1489 		for (count = 0; count < AVE_PF_MULTICAST_SIZE; count++)
1490 			ave_pfsel_stop(ndev, AVE_PFNUM_MULTICAST + count);
1491 
1492 		/* set multicast addresses */
1493 		count = 0;
1494 		netdev_for_each_mc_addr(hw_adr, ndev) {
1495 			if (count == mc_cnt)
1496 				break;
1497 			ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + count,
1498 					      hw_adr->addr, 6);
1499 			count++;
1500 		}
1501 	}
1502 }
1503 
ave_get_stats64(struct net_device * ndev,struct rtnl_link_stats64 * stats)1504 static void ave_get_stats64(struct net_device *ndev,
1505 			    struct rtnl_link_stats64 *stats)
1506 {
1507 	struct ave_private *priv = netdev_priv(ndev);
1508 	unsigned int start;
1509 
1510 	do {
1511 		start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp);
1512 		stats->rx_packets = priv->stats_rx.packets;
1513 		stats->rx_bytes	  = priv->stats_rx.bytes;
1514 	} while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start));
1515 
1516 	do {
1517 		start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp);
1518 		stats->tx_packets = priv->stats_tx.packets;
1519 		stats->tx_bytes	  = priv->stats_tx.bytes;
1520 	} while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start));
1521 
1522 	stats->rx_errors      = priv->stats_rx.errors;
1523 	stats->tx_errors      = priv->stats_tx.errors;
1524 	stats->rx_dropped     = priv->stats_rx.dropped;
1525 	stats->tx_dropped     = priv->stats_tx.dropped;
1526 	stats->rx_fifo_errors = priv->stats_rx.fifo_errors;
1527 	stats->collisions     = priv->stats_tx.collisions;
1528 }
1529 
ave_set_mac_address(struct net_device * ndev,void * p)1530 static int ave_set_mac_address(struct net_device *ndev, void *p)
1531 {
1532 	int ret = eth_mac_addr(ndev, p);
1533 
1534 	if (ret)
1535 		return ret;
1536 
1537 	ave_macaddr_init(ndev);
1538 
1539 	return 0;
1540 }
1541 
1542 static const struct net_device_ops ave_netdev_ops = {
1543 	.ndo_init		= ave_init,
1544 	.ndo_uninit		= ave_uninit,
1545 	.ndo_open		= ave_open,
1546 	.ndo_stop		= ave_stop,
1547 	.ndo_start_xmit		= ave_start_xmit,
1548 	.ndo_do_ioctl		= ave_ioctl,
1549 	.ndo_set_rx_mode	= ave_set_rx_mode,
1550 	.ndo_get_stats64	= ave_get_stats64,
1551 	.ndo_set_mac_address	= ave_set_mac_address,
1552 };
1553 
ave_probe(struct platform_device * pdev)1554 static int ave_probe(struct platform_device *pdev)
1555 {
1556 	const struct ave_soc_data *data;
1557 	struct device *dev = &pdev->dev;
1558 	char buf[ETHTOOL_FWVERS_LEN];
1559 	struct of_phandle_args args;
1560 	phy_interface_t phy_mode;
1561 	struct ave_private *priv;
1562 	struct net_device *ndev;
1563 	struct device_node *np;
1564 	struct resource	*res;
1565 	const void *mac_addr;
1566 	void __iomem *base;
1567 	const char *name;
1568 	int i, irq, ret;
1569 	u64 dma_mask;
1570 	u32 ave_id;
1571 
1572 	data = of_device_get_match_data(dev);
1573 	if (WARN_ON(!data))
1574 		return -EINVAL;
1575 
1576 	np = dev->of_node;
1577 	phy_mode = of_get_phy_mode(np);
1578 	if ((int)phy_mode < 0) {
1579 		dev_err(dev, "phy-mode not found\n");
1580 		return -EINVAL;
1581 	}
1582 
1583 	irq = platform_get_irq(pdev, 0);
1584 	if (irq < 0) {
1585 		dev_err(dev, "IRQ not found\n");
1586 		return irq;
1587 	}
1588 
1589 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1590 	base = devm_ioremap_resource(dev, res);
1591 	if (IS_ERR(base))
1592 		return PTR_ERR(base);
1593 
1594 	ndev = alloc_etherdev(sizeof(struct ave_private));
1595 	if (!ndev) {
1596 		dev_err(dev, "can't allocate ethernet device\n");
1597 		return -ENOMEM;
1598 	}
1599 
1600 	ndev->netdev_ops = &ave_netdev_ops;
1601 	ndev->ethtool_ops = &ave_ethtool_ops;
1602 	SET_NETDEV_DEV(ndev, dev);
1603 
1604 	ndev->features    |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
1605 	ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
1606 
1607 	ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
1608 
1609 	mac_addr = of_get_mac_address(np);
1610 	if (mac_addr)
1611 		ether_addr_copy(ndev->dev_addr, mac_addr);
1612 
1613 	/* if the mac address is invalid, use random mac address */
1614 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1615 		eth_hw_addr_random(ndev);
1616 		dev_warn(dev, "Using random MAC address: %pM\n",
1617 			 ndev->dev_addr);
1618 	}
1619 
1620 	priv = netdev_priv(ndev);
1621 	priv->base = base;
1622 	priv->irq = irq;
1623 	priv->ndev = ndev;
1624 	priv->msg_enable = netif_msg_init(-1, AVE_DEFAULT_MSG_ENABLE);
1625 	priv->phy_mode = phy_mode;
1626 	priv->data = data;
1627 
1628 	if (IS_DESC_64BIT(priv)) {
1629 		priv->desc_size = AVE_DESC_SIZE_64;
1630 		priv->tx.daddr  = AVE_TXDM_64;
1631 		priv->rx.daddr  = AVE_RXDM_64;
1632 		dma_mask = DMA_BIT_MASK(64);
1633 	} else {
1634 		priv->desc_size = AVE_DESC_SIZE_32;
1635 		priv->tx.daddr  = AVE_TXDM_32;
1636 		priv->rx.daddr  = AVE_RXDM_32;
1637 		dma_mask = DMA_BIT_MASK(32);
1638 	}
1639 	ret = dma_set_mask(dev, dma_mask);
1640 	if (ret)
1641 		goto out_free_netdev;
1642 
1643 	priv->tx.ndesc = AVE_NR_TXDESC;
1644 	priv->rx.ndesc = AVE_NR_RXDESC;
1645 
1646 	u64_stats_init(&priv->stats_tx.syncp);
1647 	u64_stats_init(&priv->stats_rx.syncp);
1648 
1649 	for (i = 0; i < AVE_MAX_CLKS; i++) {
1650 		name = priv->data->clock_names[i];
1651 		if (!name)
1652 			break;
1653 		priv->clk[i] = devm_clk_get(dev, name);
1654 		if (IS_ERR(priv->clk[i])) {
1655 			ret = PTR_ERR(priv->clk[i]);
1656 			goto out_free_netdev;
1657 		}
1658 		priv->nclks++;
1659 	}
1660 
1661 	for (i = 0; i < AVE_MAX_RSTS; i++) {
1662 		name = priv->data->reset_names[i];
1663 		if (!name)
1664 			break;
1665 		priv->rst[i] = devm_reset_control_get_shared(dev, name);
1666 		if (IS_ERR(priv->rst[i])) {
1667 			ret = PTR_ERR(priv->rst[i]);
1668 			goto out_free_netdev;
1669 		}
1670 		priv->nrsts++;
1671 	}
1672 
1673 	ret = of_parse_phandle_with_fixed_args(np,
1674 					       "socionext,syscon-phy-mode",
1675 					       1, 0, &args);
1676 	if (ret) {
1677 		netdev_err(ndev, "can't get syscon-phy-mode property\n");
1678 		goto out_free_netdev;
1679 	}
1680 	priv->regmap = syscon_node_to_regmap(args.np);
1681 	of_node_put(args.np);
1682 	if (IS_ERR(priv->regmap)) {
1683 		netdev_err(ndev, "can't map syscon-phy-mode\n");
1684 		ret = PTR_ERR(priv->regmap);
1685 		goto out_free_netdev;
1686 	}
1687 	ret = priv->data->get_pinmode(priv, phy_mode, args.args[0]);
1688 	if (ret) {
1689 		netdev_err(ndev, "invalid phy-mode setting\n");
1690 		goto out_free_netdev;
1691 	}
1692 
1693 	priv->mdio = devm_mdiobus_alloc(dev);
1694 	if (!priv->mdio) {
1695 		ret = -ENOMEM;
1696 		goto out_free_netdev;
1697 	}
1698 	priv->mdio->priv = ndev;
1699 	priv->mdio->parent = dev;
1700 	priv->mdio->read = ave_mdiobus_read;
1701 	priv->mdio->write = ave_mdiobus_write;
1702 	priv->mdio->name = "uniphier-mdio";
1703 	snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%x",
1704 		 pdev->name, pdev->id);
1705 
1706 	/* Register as a NAPI supported driver */
1707 	netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, priv->rx.ndesc);
1708 	netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx,
1709 			  priv->tx.ndesc);
1710 
1711 	platform_set_drvdata(pdev, ndev);
1712 
1713 	ret = register_netdev(ndev);
1714 	if (ret) {
1715 		dev_err(dev, "failed to register netdevice\n");
1716 		goto out_del_napi;
1717 	}
1718 
1719 	/* get ID and version */
1720 	ave_id = readl(priv->base + AVE_IDR);
1721 	ave_hw_read_version(ndev, buf, sizeof(buf));
1722 
1723 	dev_info(dev, "Socionext %c%c%c%c Ethernet IP %s (irq=%d, phy=%s)\n",
1724 		 (ave_id >> 24) & 0xff, (ave_id >> 16) & 0xff,
1725 		 (ave_id >> 8) & 0xff, (ave_id >> 0) & 0xff,
1726 		 buf, priv->irq, phy_modes(phy_mode));
1727 
1728 	return 0;
1729 
1730 out_del_napi:
1731 	netif_napi_del(&priv->napi_rx);
1732 	netif_napi_del(&priv->napi_tx);
1733 out_free_netdev:
1734 	free_netdev(ndev);
1735 
1736 	return ret;
1737 }
1738 
ave_remove(struct platform_device * pdev)1739 static int ave_remove(struct platform_device *pdev)
1740 {
1741 	struct net_device *ndev = platform_get_drvdata(pdev);
1742 	struct ave_private *priv = netdev_priv(ndev);
1743 
1744 	unregister_netdev(ndev);
1745 	netif_napi_del(&priv->napi_rx);
1746 	netif_napi_del(&priv->napi_tx);
1747 	free_netdev(ndev);
1748 
1749 	return 0;
1750 }
1751 
ave_pro4_get_pinmode(struct ave_private * priv,phy_interface_t phy_mode,u32 arg)1752 static int ave_pro4_get_pinmode(struct ave_private *priv,
1753 				phy_interface_t phy_mode, u32 arg)
1754 {
1755 	if (arg > 0)
1756 		return -EINVAL;
1757 
1758 	priv->pinmode_mask = SG_ETPINMODE_RMII(0);
1759 
1760 	switch (phy_mode) {
1761 	case PHY_INTERFACE_MODE_RMII:
1762 		priv->pinmode_val = SG_ETPINMODE_RMII(0);
1763 		break;
1764 	case PHY_INTERFACE_MODE_MII:
1765 	case PHY_INTERFACE_MODE_RGMII:
1766 		priv->pinmode_val = 0;
1767 		break;
1768 	default:
1769 		return -EINVAL;
1770 	}
1771 
1772 	return 0;
1773 }
1774 
ave_ld11_get_pinmode(struct ave_private * priv,phy_interface_t phy_mode,u32 arg)1775 static int ave_ld11_get_pinmode(struct ave_private *priv,
1776 				phy_interface_t phy_mode, u32 arg)
1777 {
1778 	if (arg > 0)
1779 		return -EINVAL;
1780 
1781 	priv->pinmode_mask = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0);
1782 
1783 	switch (phy_mode) {
1784 	case PHY_INTERFACE_MODE_INTERNAL:
1785 		priv->pinmode_val = 0;
1786 		break;
1787 	case PHY_INTERFACE_MODE_RMII:
1788 		priv->pinmode_val = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0);
1789 		break;
1790 	default:
1791 		return -EINVAL;
1792 	}
1793 
1794 	return 0;
1795 }
1796 
ave_ld20_get_pinmode(struct ave_private * priv,phy_interface_t phy_mode,u32 arg)1797 static int ave_ld20_get_pinmode(struct ave_private *priv,
1798 				phy_interface_t phy_mode, u32 arg)
1799 {
1800 	if (arg > 0)
1801 		return -EINVAL;
1802 
1803 	priv->pinmode_mask = SG_ETPINMODE_RMII(0);
1804 
1805 	switch (phy_mode) {
1806 	case PHY_INTERFACE_MODE_RMII:
1807 		priv->pinmode_val = SG_ETPINMODE_RMII(0);
1808 		break;
1809 	case PHY_INTERFACE_MODE_RGMII:
1810 		priv->pinmode_val = 0;
1811 		break;
1812 	default:
1813 		return -EINVAL;
1814 	}
1815 
1816 	return 0;
1817 }
1818 
ave_pxs3_get_pinmode(struct ave_private * priv,phy_interface_t phy_mode,u32 arg)1819 static int ave_pxs3_get_pinmode(struct ave_private *priv,
1820 				phy_interface_t phy_mode, u32 arg)
1821 {
1822 	if (arg > 1)
1823 		return -EINVAL;
1824 
1825 	priv->pinmode_mask = SG_ETPINMODE_RMII(arg);
1826 
1827 	switch (phy_mode) {
1828 	case PHY_INTERFACE_MODE_RMII:
1829 		priv->pinmode_val = SG_ETPINMODE_RMII(arg);
1830 		break;
1831 	case PHY_INTERFACE_MODE_RGMII:
1832 		priv->pinmode_val = 0;
1833 		break;
1834 	default:
1835 		return -EINVAL;
1836 	}
1837 
1838 	return 0;
1839 }
1840 
1841 static const struct ave_soc_data ave_pro4_data = {
1842 	.is_desc_64bit = false,
1843 	.clock_names = {
1844 		"gio", "ether", "ether-gb", "ether-phy",
1845 	},
1846 	.reset_names = {
1847 		"gio", "ether",
1848 	},
1849 	.get_pinmode = ave_pro4_get_pinmode,
1850 };
1851 
1852 static const struct ave_soc_data ave_pxs2_data = {
1853 	.is_desc_64bit = false,
1854 	.clock_names = {
1855 		"ether",
1856 	},
1857 	.reset_names = {
1858 		"ether",
1859 	},
1860 	.get_pinmode = ave_pro4_get_pinmode,
1861 };
1862 
1863 static const struct ave_soc_data ave_ld11_data = {
1864 	.is_desc_64bit = false,
1865 	.clock_names = {
1866 		"ether",
1867 	},
1868 	.reset_names = {
1869 		"ether",
1870 	},
1871 	.get_pinmode = ave_ld11_get_pinmode,
1872 };
1873 
1874 static const struct ave_soc_data ave_ld20_data = {
1875 	.is_desc_64bit = true,
1876 	.clock_names = {
1877 		"ether",
1878 	},
1879 	.reset_names = {
1880 		"ether",
1881 	},
1882 	.get_pinmode = ave_ld20_get_pinmode,
1883 };
1884 
1885 static const struct ave_soc_data ave_pxs3_data = {
1886 	.is_desc_64bit = false,
1887 	.clock_names = {
1888 		"ether",
1889 	},
1890 	.reset_names = {
1891 		"ether",
1892 	},
1893 	.get_pinmode = ave_pxs3_get_pinmode,
1894 };
1895 
1896 static const struct of_device_id of_ave_match[] = {
1897 	{
1898 		.compatible = "socionext,uniphier-pro4-ave4",
1899 		.data = &ave_pro4_data,
1900 	},
1901 	{
1902 		.compatible = "socionext,uniphier-pxs2-ave4",
1903 		.data = &ave_pxs2_data,
1904 	},
1905 	{
1906 		.compatible = "socionext,uniphier-ld11-ave4",
1907 		.data = &ave_ld11_data,
1908 	},
1909 	{
1910 		.compatible = "socionext,uniphier-ld20-ave4",
1911 		.data = &ave_ld20_data,
1912 	},
1913 	{
1914 		.compatible = "socionext,uniphier-pxs3-ave4",
1915 		.data = &ave_pxs3_data,
1916 	},
1917 	{ /* Sentinel */ }
1918 };
1919 MODULE_DEVICE_TABLE(of, of_ave_match);
1920 
1921 static struct platform_driver ave_driver = {
1922 	.probe  = ave_probe,
1923 	.remove = ave_remove,
1924 	.driver	= {
1925 		.name = "ave",
1926 		.of_match_table	= of_ave_match,
1927 	},
1928 };
1929 module_platform_driver(ave_driver);
1930 
1931 MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver");
1932 MODULE_LICENSE("GPL v2");
1933