• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * MediaTek ethernet IP driver for U-Boot
4  *
5  * Copyright (C) 2018 Stefan Roese <sr@denx.de>
6  *
7  * This code is mostly based on the code extracted from this MediaTek
8  * github repository:
9  *
10  * https://github.com/MediaTek-Labs/linkit-smart-uboot.git
11  *
12  * I was not able to find a specific license or other developers
13  * copyrights here, so I can't add them here.
14  */
15 
16 #include <common.h>
17 #include <cpu_func.h>
18 #include <dm.h>
19 #include <malloc.h>
20 #include <miiphy.h>
21 #include <net.h>
22 #include <reset.h>
23 #include <wait_bit.h>
24 #include <asm/io.h>
25 #include <linux/bitfield.h>
26 #include <linux/err.h>
27 
28 /* Ethernet frame engine register */
29 #define PDMA_RELATED		0x0800
30 
31 #define TX_BASE_PTR0		(PDMA_RELATED + 0x000)
32 #define TX_MAX_CNT0		(PDMA_RELATED + 0x004)
33 #define TX_CTX_IDX0		(PDMA_RELATED + 0x008)
34 #define TX_DTX_IDX0		(PDMA_RELATED + 0x00c)
35 
36 #define RX_BASE_PTR0		(PDMA_RELATED + 0x100)
37 #define RX_MAX_CNT0		(PDMA_RELATED + 0x104)
38 #define RX_CALC_IDX0		(PDMA_RELATED + 0x108)
39 
40 #define PDMA_GLO_CFG		(PDMA_RELATED + 0x204)
41 #define PDMA_RST_IDX		(PDMA_RELATED + 0x208)
42 #define DLY_INT_CFG		(PDMA_RELATED + 0x20c)
43 
44 #define SDM_RELATED		0x0c00
45 
46 #define SDM_MAC_ADRL		(SDM_RELATED + 0x0c)	/* MAC address LSB */
47 #define SDM_MAC_ADRH		(SDM_RELATED + 0x10)	/* MAC Address MSB */
48 
49 #define RST_DTX_IDX0		BIT(0)
50 #define RST_DRX_IDX0		BIT(16)
51 
52 #define TX_DMA_EN		BIT(0)
53 #define TX_DMA_BUSY		BIT(1)
54 #define RX_DMA_EN		BIT(2)
55 #define RX_DMA_BUSY		BIT(3)
56 #define TX_WB_DDONE		BIT(6)
57 
58 /* Ethernet switch register */
59 #define MT7628_SWITCH_FCT0	0x0008
60 #define MT7628_SWITCH_PFC1	0x0014
61 #define MT7628_SWITCH_PVIDC0	0x0040
62 #define MT7628_SWITCH_PVIDC1	0x0044
63 #define MT7628_SWITCH_PVIDC2	0x0048
64 #define MT7628_SWITCH_PVIDC3	0x004c
65 #define MT7628_SWITCH_VMSC0	0x0070
66 #define MT7628_SWITCH_FPA	0x0084
67 #define MT7628_SWITCH_SOCPC	0x008c
68 #define MT7628_SWITCH_POC0	0x0090
69 #define MT7628_SWITCH_POC2	0x0098
70 #define MT7628_SWITCH_SGC	0x009c
71 #define MT7628_SWITCH_PCR0	0x00c0
72 #define PCR0_PHY_ADDR		GENMASK(4, 0)
73 #define PCR0_PHY_REG		GENMASK(12, 8)
74 #define PCR0_WT_PHY_CMD		BIT(13)
75 #define PCR0_RD_PHY_CMD		BIT(14)
76 #define PCR0_WT_DATA		GENMASK(31, 16)
77 
78 #define MT7628_SWITCH_PCR1	0x00c4
79 #define PCR1_WT_DONE		BIT(0)
80 #define PCR1_RD_RDY		BIT(1)
81 #define PCR1_RD_DATA		GENMASK(31, 16)
82 
83 #define MT7628_SWITCH_FPA1	0x00c8
84 #define MT7628_SWITCH_FCT2	0x00cc
85 #define MT7628_SWITCH_SGC2	0x00e4
86 #define MT7628_SWITCH_BMU_CTRL	0x0110
87 
88 /* rxd2 */
89 #define RX_DMA_DONE		BIT(31)
90 #define RX_DMA_LSO		BIT(30)
91 #define RX_DMA_PLEN0		GENMASK(29, 16)
92 #define RX_DMA_TAG		BIT(15)
93 
94 struct fe_rx_dma {
95 	unsigned int rxd1;
96 	unsigned int rxd2;
97 	unsigned int rxd3;
98 	unsigned int rxd4;
99 } __packed __aligned(4);
100 
101 #define TX_DMA_PLEN0		GENMASK(29, 16)
102 #define TX_DMA_LS1		BIT(14)
103 #define TX_DMA_LS0		BIT(30)
104 #define TX_DMA_DONE		BIT(31)
105 
106 #define TX_DMA_INS_VLAN_MT7621	BIT(16)
107 #define TX_DMA_INS_VLAN		BIT(7)
108 #define TX_DMA_INS_PPPOE	BIT(12)
109 #define TX_DMA_PN		GENMASK(26, 24)
110 
111 struct fe_tx_dma {
112 	unsigned int txd1;
113 	unsigned int txd2;
114 	unsigned int txd3;
115 	unsigned int txd4;
116 } __packed __aligned(4);
117 
118 #define NUM_RX_DESC		256
119 #define NUM_TX_DESC		4
120 #define NUM_PHYS		5
121 
122 #define PADDING_LENGTH		60
123 
124 #define MTK_QDMA_PAGE_SIZE	2048
125 
126 #define CONFIG_MDIO_TIMEOUT	100
127 #define CONFIG_DMA_STOP_TIMEOUT	100
128 #define CONFIG_TX_DMA_TIMEOUT	100
129 
130 struct mt7628_eth_dev {
131 	void __iomem *base;		/* frame engine base address */
132 	void __iomem *eth_sw_base;	/* switch base address */
133 
134 	struct mii_dev *bus;
135 
136 	struct fe_tx_dma *tx_ring;
137 	struct fe_rx_dma *rx_ring;
138 
139 	u8 *rx_buf[NUM_RX_DESC];
140 
141 	/* Point to the next RXD DMA wants to use in RXD Ring0 */
142 	int rx_dma_idx;
143 	/* Point to the next TXD in TXD Ring0 CPU wants to use */
144 	int tx_dma_idx;
145 
146 	struct reset_ctl	rst_ephy;
147 
148 	struct phy_device *phy;
149 
150 	int wan_port;
151 };
152 
153 static int mt7628_eth_free_pkt(struct udevice *dev, uchar *packet, int length);
154 
mdio_wait_read(struct mt7628_eth_dev * priv,u32 mask,bool mask_set)155 static int mdio_wait_read(struct mt7628_eth_dev *priv, u32 mask, bool mask_set)
156 {
157 	void __iomem *base = priv->eth_sw_base;
158 	int ret;
159 
160 	ret = wait_for_bit_le32(base + MT7628_SWITCH_PCR1, mask, mask_set,
161 				CONFIG_MDIO_TIMEOUT, false);
162 	if (ret) {
163 		printf("MDIO operation timeout!\n");
164 		return -ETIMEDOUT;
165 	}
166 
167 	return 0;
168 }
169 
mii_mgr_read(struct mt7628_eth_dev * priv,u32 phy_addr,u32 phy_register,u32 * read_data)170 static int mii_mgr_read(struct mt7628_eth_dev *priv,
171 			u32 phy_addr, u32 phy_register, u32 *read_data)
172 {
173 	void __iomem *base = priv->eth_sw_base;
174 	u32 status = 0;
175 	u32 ret;
176 
177 	*read_data = 0xffff;
178 	/* Make sure previous read operation is complete */
179 	ret = mdio_wait_read(priv, PCR1_RD_RDY, false);
180 	if (ret)
181 		return ret;
182 
183 	writel(PCR0_RD_PHY_CMD |
184 	       FIELD_PREP(PCR0_PHY_REG, phy_register) |
185 	       FIELD_PREP(PCR0_PHY_ADDR, phy_addr),
186 	       base + MT7628_SWITCH_PCR0);
187 
188 	/* Make sure previous read operation is complete */
189 	ret = mdio_wait_read(priv, PCR1_RD_RDY, true);
190 	if (ret)
191 		return ret;
192 
193 	status = readl(base + MT7628_SWITCH_PCR1);
194 	*read_data = FIELD_GET(PCR1_RD_DATA, status);
195 
196 	return 0;
197 }
198 
mii_mgr_write(struct mt7628_eth_dev * priv,u32 phy_addr,u32 phy_register,u32 write_data)199 static int mii_mgr_write(struct mt7628_eth_dev *priv,
200 			 u32 phy_addr, u32 phy_register, u32 write_data)
201 {
202 	void __iomem *base = priv->eth_sw_base;
203 	u32 data;
204 	int ret;
205 
206 	/* Make sure previous write operation is complete */
207 	ret = mdio_wait_read(priv, PCR1_WT_DONE, false);
208 	if (ret)
209 		return ret;
210 
211 	data = FIELD_PREP(PCR0_WT_DATA, write_data) |
212 		FIELD_PREP(PCR0_PHY_REG, phy_register) |
213 		FIELD_PREP(PCR0_PHY_ADDR, phy_addr) |
214 		PCR0_WT_PHY_CMD;
215 	writel(data, base + MT7628_SWITCH_PCR0);
216 
217 	return mdio_wait_read(priv, PCR1_WT_DONE, true);
218 }
219 
mt7628_mdio_read(struct mii_dev * bus,int addr,int devad,int reg)220 static int mt7628_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
221 {
222 	u32 val;
223 	int ret;
224 
225 	ret = mii_mgr_read(bus->priv, addr, reg, &val);
226 	if (ret)
227 		return ret;
228 
229 	return val;
230 }
231 
mt7628_mdio_write(struct mii_dev * bus,int addr,int devad,int reg,u16 value)232 static int mt7628_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
233 			     u16 value)
234 {
235 	return mii_mgr_write(bus->priv, addr, reg, value);
236 }
237 
mt7628_ephy_init(struct mt7628_eth_dev * priv)238 static void mt7628_ephy_init(struct mt7628_eth_dev *priv)
239 {
240 	int i;
241 
242 	mii_mgr_write(priv, 0, 31, 0x2000);	/* change G2 page */
243 	mii_mgr_write(priv, 0, 26, 0x0000);
244 
245 	for (i = 0; i < 5; i++) {
246 		mii_mgr_write(priv, i, 31, 0x8000);	/* change L0 page */
247 		mii_mgr_write(priv, i,  0, 0x3100);
248 
249 		/* EEE disable */
250 		mii_mgr_write(priv, i, 30, 0xa000);
251 		mii_mgr_write(priv, i, 31, 0xa000);	/* change L2 page */
252 		mii_mgr_write(priv, i, 16, 0x0606);
253 		mii_mgr_write(priv, i, 23, 0x0f0e);
254 		mii_mgr_write(priv, i, 24, 0x1610);
255 		mii_mgr_write(priv, i, 30, 0x1f15);
256 		mii_mgr_write(priv, i, 28, 0x6111);
257 	}
258 
259 	/* 100Base AOI setting */
260 	mii_mgr_write(priv, 0, 31, 0x5000);	/* change G5 page */
261 	mii_mgr_write(priv, 0, 19, 0x004a);
262 	mii_mgr_write(priv, 0, 20, 0x015a);
263 	mii_mgr_write(priv, 0, 21, 0x00ee);
264 	mii_mgr_write(priv, 0, 22, 0x0033);
265 	mii_mgr_write(priv, 0, 23, 0x020a);
266 	mii_mgr_write(priv, 0, 24, 0x0000);
267 	mii_mgr_write(priv, 0, 25, 0x024a);
268 	mii_mgr_write(priv, 0, 26, 0x035a);
269 	mii_mgr_write(priv, 0, 27, 0x02ee);
270 	mii_mgr_write(priv, 0, 28, 0x0233);
271 	mii_mgr_write(priv, 0, 29, 0x000a);
272 	mii_mgr_write(priv, 0, 30, 0x0000);
273 
274 	/* Fix EPHY idle state abnormal behavior */
275 	mii_mgr_write(priv, 0, 31, 0x4000);	/* change G4 page */
276 	mii_mgr_write(priv, 0, 29, 0x000d);
277 	mii_mgr_write(priv, 0, 30, 0x0500);
278 }
279 
rt305x_esw_init(struct mt7628_eth_dev * priv)280 static void rt305x_esw_init(struct mt7628_eth_dev *priv)
281 {
282 	void __iomem *base = priv->eth_sw_base;
283 	void __iomem *reg;
284 	u32 val = 0, pvid;
285 	int i;
286 
287 	/*
288 	 * FC_RLS_TH=200, FC_SET_TH=160
289 	 * DROP_RLS=120, DROP_SET_TH=80
290 	 */
291 	writel(0xc8a07850, base + MT7628_SWITCH_FCT0);
292 	writel(0x00000000, base + MT7628_SWITCH_SGC2);
293 	writel(0x00405555, base + MT7628_SWITCH_PFC1);
294 	writel(0x00007f7f, base + MT7628_SWITCH_POC0);
295 	writel(0x00007f7f, base + MT7628_SWITCH_POC2);	/* disable VLAN */
296 	writel(0x0002500c, base + MT7628_SWITCH_FCT2);
297 	/* hashing algorithm=XOR48, aging interval=300sec */
298 	writel(0x0008a301, base + MT7628_SWITCH_SGC);
299 	writel(0x02404040, base + MT7628_SWITCH_SOCPC);
300 
301 	/* Ext PHY Addr=0x1f */
302 	writel(0x3f502b28, base + MT7628_SWITCH_FPA1);
303 	writel(0x00000000, base + MT7628_SWITCH_FPA);
304 	/* 1us cycle number=125 (FE's clock=125Mhz) */
305 	writel(0x7d000000, base + MT7628_SWITCH_BMU_CTRL);
306 
307 	/* LAN/WAN partition, WAN port will be unusable in u-boot network */
308 	if (priv->wan_port >= 0 && priv->wan_port < 6) {
309 		for (i = 0; i < 8; i++) {
310 			pvid = i == priv->wan_port ? 2 : 1;
311 			reg = base + MT7628_SWITCH_PVIDC0 + (i / 2) * 4;
312 			if (i % 2 == 0) {
313 				val = pvid;
314 			} else {
315 				val |= (pvid << 12);
316 				writel(val, reg);
317 			}
318 		}
319 
320 		val = 0xffff407f;
321 		val |= 1 << (8 + priv->wan_port);
322 		val &= ~(1 << priv->wan_port);
323 		writel(val, base + MT7628_SWITCH_VMSC0);
324 	}
325 
326 	/* Reset PHY */
327 	reset_assert(&priv->rst_ephy);
328 	reset_deassert(&priv->rst_ephy);
329 	mdelay(10);
330 
331 	mt7628_ephy_init(priv);
332 }
333 
eth_dma_start(struct mt7628_eth_dev * priv)334 static void eth_dma_start(struct mt7628_eth_dev *priv)
335 {
336 	void __iomem *base = priv->base;
337 
338 	setbits_le32(base + PDMA_GLO_CFG, TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
339 }
340 
eth_dma_stop(struct mt7628_eth_dev * priv)341 static void eth_dma_stop(struct mt7628_eth_dev *priv)
342 {
343 	void __iomem *base = priv->base;
344 	int ret;
345 
346 	clrbits_le32(base + PDMA_GLO_CFG, TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
347 
348 	/* Wait for DMA to stop */
349 	ret = wait_for_bit_le32(base + PDMA_GLO_CFG,
350 				RX_DMA_BUSY | TX_DMA_BUSY, false,
351 				CONFIG_DMA_STOP_TIMEOUT, false);
352 	if (ret)
353 		printf("DMA stop timeout error!\n");
354 }
355 
mt7628_eth_write_hwaddr(struct udevice * dev)356 static int mt7628_eth_write_hwaddr(struct udevice *dev)
357 {
358 	struct mt7628_eth_dev *priv = dev_get_priv(dev);
359 	void __iomem *base = priv->base;
360 	u8 *addr = ((struct eth_pdata *)dev_get_platdata(dev))->enetaddr;
361 	u32 val;
362 
363 	/* Set MAC address. */
364 	val = addr[0];
365 	val = (val << 8) | addr[1];
366 	writel(val, base + SDM_MAC_ADRH);
367 
368 	val = addr[2];
369 	val = (val << 8) | addr[3];
370 	val = (val << 8) | addr[4];
371 	val = (val << 8) | addr[5];
372 	writel(val, base + SDM_MAC_ADRL);
373 
374 	return 0;
375 }
376 
mt7628_eth_send(struct udevice * dev,void * packet,int length)377 static int mt7628_eth_send(struct udevice *dev, void *packet, int length)
378 {
379 	struct mt7628_eth_dev *priv = dev_get_priv(dev);
380 	void __iomem *base = priv->base;
381 	int ret;
382 	int idx;
383 	int i;
384 
385 	idx = priv->tx_dma_idx;
386 
387 	/* Pad message to a minimum length */
388 	if (length < PADDING_LENGTH) {
389 		char *p = (char *)packet;
390 
391 		for (i = 0; i < PADDING_LENGTH - length; i++)
392 			p[length + i] = 0;
393 		length = PADDING_LENGTH;
394 	}
395 
396 	/* Check if buffer is ready for next TX DMA */
397 	ret = wait_for_bit_le32(&priv->tx_ring[idx].txd2, TX_DMA_DONE, true,
398 				CONFIG_TX_DMA_TIMEOUT, false);
399 	if (ret) {
400 		printf("TX: DMA still busy on buffer %d\n", idx);
401 		return ret;
402 	}
403 
404 	flush_dcache_range((u32)packet, (u32)packet + length);
405 
406 	priv->tx_ring[idx].txd1 = CPHYSADDR(packet);
407 	priv->tx_ring[idx].txd2 &= ~TX_DMA_PLEN0;
408 	priv->tx_ring[idx].txd2 |= FIELD_PREP(TX_DMA_PLEN0, length);
409 	priv->tx_ring[idx].txd2 &= ~TX_DMA_DONE;
410 
411 	idx = (idx + 1) % NUM_TX_DESC;
412 
413 	/* Make sure the writes executed at this place */
414 	wmb();
415 	writel(idx, base + TX_CTX_IDX0);
416 
417 	priv->tx_dma_idx = idx;
418 
419 	return 0;
420 }
421 
mt7628_eth_recv(struct udevice * dev,int flags,uchar ** packetp)422 static int mt7628_eth_recv(struct udevice *dev, int flags, uchar **packetp)
423 {
424 	struct mt7628_eth_dev *priv = dev_get_priv(dev);
425 	u32 rxd_info;
426 	int length;
427 	int idx;
428 
429 	idx = priv->rx_dma_idx;
430 
431 	rxd_info = priv->rx_ring[idx].rxd2;
432 	if ((rxd_info & RX_DMA_DONE) == 0)
433 		return -EAGAIN;
434 
435 	length = FIELD_GET(RX_DMA_PLEN0, priv->rx_ring[idx].rxd2);
436 	if (length == 0 || length > MTK_QDMA_PAGE_SIZE) {
437 		printf("%s: invalid length (%d bytes)\n", __func__, length);
438 		mt7628_eth_free_pkt(dev, NULL, 0);
439 		return -EIO;
440 	}
441 
442 	*packetp = priv->rx_buf[idx];
443 	invalidate_dcache_range((u32)*packetp, (u32)*packetp + length);
444 
445 	priv->rx_ring[idx].rxd4 = 0;
446 	priv->rx_ring[idx].rxd2 = RX_DMA_LSO;
447 
448 	/* Make sure the writes executed at this place */
449 	wmb();
450 
451 	return length;
452 }
453 
mt7628_eth_free_pkt(struct udevice * dev,uchar * packet,int length)454 static int mt7628_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
455 {
456 	struct mt7628_eth_dev *priv = dev_get_priv(dev);
457 	void __iomem *base = priv->base;
458 	int idx;
459 
460 	idx = priv->rx_dma_idx;
461 
462 	/* Move point to next RXD which wants to alloc */
463 	writel(idx, base + RX_CALC_IDX0);
464 
465 	/* Update to Next packet point that was received */
466 	idx = (idx + 1) % NUM_RX_DESC;
467 
468 	priv->rx_dma_idx = idx;
469 
470 	return 0;
471 }
472 
mt7628_eth_start(struct udevice * dev)473 static int mt7628_eth_start(struct udevice *dev)
474 {
475 	struct mt7628_eth_dev *priv = dev_get_priv(dev);
476 	void __iomem *base = priv->base;
477 	uchar packet[MTK_QDMA_PAGE_SIZE];
478 	uchar *packetp;
479 	int ret;
480 	int i;
481 
482 	for (i = 0; i < NUM_RX_DESC; i++) {
483 		memset((void *)&priv->rx_ring[i], 0, sizeof(priv->rx_ring[0]));
484 		priv->rx_ring[i].rxd2 |= RX_DMA_LSO;
485 		priv->rx_ring[i].rxd1 = CPHYSADDR(priv->rx_buf[i]);
486 	}
487 
488 	for (i = 0; i < NUM_TX_DESC; i++) {
489 		memset((void *)&priv->tx_ring[i], 0, sizeof(priv->tx_ring[0]));
490 		priv->tx_ring[i].txd2 = TX_DMA_LS0 | TX_DMA_DONE;
491 		priv->tx_ring[i].txd4 = FIELD_PREP(TX_DMA_PN, 1);
492 	}
493 
494 	priv->rx_dma_idx = 0;
495 	priv->tx_dma_idx = 0;
496 
497 	/* Make sure the writes executed at this place */
498 	wmb();
499 
500 	/* disable delay interrupt */
501 	writel(0, base + DLY_INT_CFG);
502 
503 	clrbits_le32(base + PDMA_GLO_CFG, 0xffff0000);
504 
505 	/* Tell the adapter where the TX/RX rings are located. */
506 	writel(CPHYSADDR(&priv->rx_ring[0]), base + RX_BASE_PTR0);
507 	writel(CPHYSADDR((u32)&priv->tx_ring[0]), base + TX_BASE_PTR0);
508 
509 	writel(NUM_RX_DESC, base + RX_MAX_CNT0);
510 	writel(NUM_TX_DESC, base + TX_MAX_CNT0);
511 
512 	writel(priv->tx_dma_idx, base + TX_CTX_IDX0);
513 	writel(RST_DTX_IDX0, base + PDMA_RST_IDX);
514 
515 	writel(NUM_RX_DESC - 1, base + RX_CALC_IDX0);
516 	writel(RST_DRX_IDX0, base + PDMA_RST_IDX);
517 
518 	/* Make sure the writes executed at this place */
519 	wmb();
520 	eth_dma_start(priv);
521 
522 	if (priv->phy) {
523 		ret = phy_startup(priv->phy);
524 		if (ret)
525 			return ret;
526 
527 		if (!priv->phy->link)
528 			return -EAGAIN;
529 	}
530 
531 	/*
532 	 * The integrated switch seems to queue some received ethernet
533 	 * packets in some FIFO. Lets read the already queued packets
534 	 * out by using the receive routine, so that these old messages
535 	 * are dropped before the new xfer starts.
536 	 */
537 	packetp = &packet[0];
538 	while (mt7628_eth_recv(dev, 0, &packetp) != -EAGAIN)
539 		mt7628_eth_free_pkt(dev, packetp, 0);
540 
541 	return 0;
542 }
543 
mt7628_eth_stop(struct udevice * dev)544 static void mt7628_eth_stop(struct udevice *dev)
545 {
546 	struct mt7628_eth_dev *priv = dev_get_priv(dev);
547 
548 	eth_dma_stop(priv);
549 }
550 
mt7628_eth_probe(struct udevice * dev)551 static int mt7628_eth_probe(struct udevice *dev)
552 {
553 	struct mt7628_eth_dev *priv = dev_get_priv(dev);
554 	struct mii_dev *bus;
555 	int poll_link_phy;
556 	int ret;
557 	int i;
558 
559 	/* Save frame-engine base address for later use */
560 	priv->base = dev_remap_addr_index(dev, 0);
561 	if (IS_ERR(priv->base))
562 		return PTR_ERR(priv->base);
563 
564 	/* Save switch base address for later use */
565 	priv->eth_sw_base = dev_remap_addr_index(dev, 1);
566 	if (IS_ERR(priv->eth_sw_base))
567 		return PTR_ERR(priv->eth_sw_base);
568 
569 	/* Reset controller */
570 	ret = reset_get_by_name(dev, "ephy", &priv->rst_ephy);
571 	if (ret) {
572 		pr_err("unable to find reset controller for ethernet PHYs\n");
573 		return ret;
574 	}
575 
576 	/* WAN port will be isolated from LAN ports */
577 	priv->wan_port = dev_read_u32_default(dev, "mediatek,wan-port", -1);
578 
579 	/* Put rx and tx rings into KSEG1 area (uncached) */
580 	priv->tx_ring = (struct fe_tx_dma *)
581 		KSEG1ADDR(memalign(ARCH_DMA_MINALIGN,
582 				   sizeof(*priv->tx_ring) * NUM_TX_DESC));
583 	priv->rx_ring = (struct fe_rx_dma *)
584 		KSEG1ADDR(memalign(ARCH_DMA_MINALIGN,
585 				   sizeof(*priv->rx_ring) * NUM_RX_DESC));
586 
587 	for (i = 0; i < NUM_RX_DESC; i++)
588 		priv->rx_buf[i] = memalign(PKTALIGN, MTK_QDMA_PAGE_SIZE);
589 
590 	bus = mdio_alloc();
591 	if (!bus) {
592 		printf("Failed to allocate MDIO bus\n");
593 		return -ENOMEM;
594 	}
595 
596 	bus->read = mt7628_mdio_read;
597 	bus->write = mt7628_mdio_write;
598 	snprintf(bus->name, sizeof(bus->name), dev->name);
599 	bus->priv = (void *)priv;
600 
601 	ret = mdio_register(bus);
602 	if (ret)
603 		return ret;
604 
605 	poll_link_phy = dev_read_u32_default(dev, "mediatek,poll-link-phy", -1);
606 	if (poll_link_phy >= 0) {
607 		if (poll_link_phy >= NUM_PHYS) {
608 			pr_err("invalid phy %d for poll-link-phy\n",
609 			       poll_link_phy);
610 			return ret;
611 		}
612 
613 		priv->phy = phy_connect(bus, poll_link_phy, dev,
614 					PHY_INTERFACE_MODE_MII);
615 		if (!priv->phy) {
616 			pr_err("failed to probe phy %d\n", poll_link_phy);
617 			return -ENODEV;
618 		}
619 
620 		priv->phy->advertising = priv->phy->supported;
621 		phy_config(priv->phy);
622 	}
623 
624 	/* Switch configuration */
625 	rt305x_esw_init(priv);
626 
627 	return 0;
628 }
629 
630 static const struct eth_ops mt7628_eth_ops = {
631 	.start		= mt7628_eth_start,
632 	.send		= mt7628_eth_send,
633 	.recv		= mt7628_eth_recv,
634 	.free_pkt	= mt7628_eth_free_pkt,
635 	.stop		= mt7628_eth_stop,
636 	.write_hwaddr	= mt7628_eth_write_hwaddr,
637 };
638 
639 static const struct udevice_id mt7628_eth_ids[] = {
640 	{ .compatible = "mediatek,mt7628-eth" },
641 	{ }
642 };
643 
644 U_BOOT_DRIVER(mt7628_eth) = {
645 	.name	= "mt7628_eth",
646 	.id	= UCLASS_ETH,
647 	.of_match = mt7628_eth_ids,
648 	.probe	= mt7628_eth_probe,
649 	.ops	= &mt7628_eth_ops,
650 	.priv_auto_alloc_size = sizeof(struct mt7628_eth_dev),
651 	.platdata_auto_alloc_size = sizeof(struct eth_pdata),
652 };
653