• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * (C) Copyright 2009
4  * Marvell Semiconductor <www.marvell.com>
5  * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
6  *
7  * (C) Copyright 2003
8  * Ingo Assmus <ingo.assmus@keymile.com>
9  *
10  * based on - Driver for MV64360X ethernet ports
11  * Copyright (C) 2002 rabeeh@galileo.co.il
12  */
13 
14 #include <common.h>
15 #include <net.h>
16 #include <malloc.h>
17 #include <miiphy.h>
18 #include <wait_bit.h>
19 #include <asm/io.h>
20 #include <linux/errno.h>
21 #include <asm/types.h>
22 #include <asm/system.h>
23 #include <asm/byteorder.h>
24 #include <asm/arch/cpu.h>
25 
26 #if defined(CONFIG_KIRKWOOD)
27 #include <asm/arch/soc.h>
28 #elif defined(CONFIG_ORION5X)
29 #include <asm/arch/orion5x.h>
30 #endif
31 
32 #include "mvgbe.h"
33 
34 DECLARE_GLOBAL_DATA_PTR;
35 
36 #ifndef CONFIG_MVGBE_PORTS
37 # define CONFIG_MVGBE_PORTS {0, 0}
38 #endif
39 
40 #define MV_PHY_ADR_REQUEST 0xee
41 #define MVGBE_SMI_REG (((struct mvgbe_registers *)MVGBE0_BASE)->smi)
42 
43 #if defined(CONFIG_PHYLIB) || defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
smi_wait_ready(struct mvgbe_device * dmvgbe)44 static int smi_wait_ready(struct mvgbe_device *dmvgbe)
45 {
46 	int ret;
47 
48 	ret = wait_for_bit_le32(&MVGBE_SMI_REG, MVGBE_PHY_SMI_BUSY_MASK, false,
49 				MVGBE_PHY_SMI_TIMEOUT_MS, false);
50 	if (ret) {
51 		printf("Error: SMI busy timeout\n");
52 		return ret;
53 	}
54 
55 	return 0;
56 }
57 
58 /*
59  * smi_reg_read - miiphy_read callback function.
60  *
61  * Returns 16bit phy register value, or -EFAULT on error
62  */
smi_reg_read(struct mii_dev * bus,int phy_adr,int devad,int reg_ofs)63 static int smi_reg_read(struct mii_dev *bus, int phy_adr, int devad,
64 			int reg_ofs)
65 {
66 	u16 data = 0;
67 	struct eth_device *dev = eth_get_dev_by_name(bus->name);
68 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
69 	struct mvgbe_registers *regs = dmvgbe->regs;
70 	u32 smi_reg;
71 	u32 timeout;
72 
73 	/* Phyadr read request */
74 	if (phy_adr == MV_PHY_ADR_REQUEST &&
75 			reg_ofs == MV_PHY_ADR_REQUEST) {
76 		/* */
77 		data = (u16) (MVGBE_REG_RD(regs->phyadr) & PHYADR_MASK);
78 		return data;
79 	}
80 	/* check parameters */
81 	if (phy_adr > PHYADR_MASK) {
82 		printf("Err..(%s) Invalid PHY address %d\n",
83 			__func__, phy_adr);
84 		return -EFAULT;
85 	}
86 	if (reg_ofs > PHYREG_MASK) {
87 		printf("Err..(%s) Invalid register offset %d\n",
88 			__func__, reg_ofs);
89 		return -EFAULT;
90 	}
91 
92 	/* wait till the SMI is not busy */
93 	if (smi_wait_ready(dmvgbe) < 0)
94 		return -EFAULT;
95 
96 	/* fill the phy address and regiser offset and read opcode */
97 	smi_reg = (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
98 		| (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS)
99 		| MVGBE_PHY_SMI_OPCODE_READ;
100 
101 	/* write the smi register */
102 	MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
103 
104 	/*wait till read value is ready */
105 	timeout = MVGBE_PHY_SMI_TIMEOUT;
106 
107 	do {
108 		/* read smi register */
109 		smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
110 		if (timeout-- == 0) {
111 			printf("Err..(%s) SMI read ready timeout\n",
112 				__func__);
113 			return -EFAULT;
114 		}
115 	} while (!(smi_reg & MVGBE_PHY_SMI_READ_VALID_MASK));
116 
117 	/* Wait for the data to update in the SMI register */
118 	for (timeout = 0; timeout < MVGBE_PHY_SMI_TIMEOUT; timeout++)
119 		;
120 
121 	data = (u16) (MVGBE_REG_RD(MVGBE_SMI_REG) & MVGBE_PHY_SMI_DATA_MASK);
122 
123 	debug("%s:(adr %d, off %d) value= %04x\n", __func__, phy_adr, reg_ofs,
124 	      data);
125 
126 	return data;
127 }
128 
129 /*
130  * smi_reg_write - miiphy_write callback function.
131  *
132  * Returns 0 if write succeed, -EFAULT on error
133  */
smi_reg_write(struct mii_dev * bus,int phy_adr,int devad,int reg_ofs,u16 data)134 static int smi_reg_write(struct mii_dev *bus, int phy_adr, int devad,
135 			 int reg_ofs, u16 data)
136 {
137 	struct eth_device *dev = eth_get_dev_by_name(bus->name);
138 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
139 	struct mvgbe_registers *regs = dmvgbe->regs;
140 	u32 smi_reg;
141 
142 	/* Phyadr write request*/
143 	if (phy_adr == MV_PHY_ADR_REQUEST &&
144 			reg_ofs == MV_PHY_ADR_REQUEST) {
145 		MVGBE_REG_WR(regs->phyadr, data);
146 		return 0;
147 	}
148 
149 	/* check parameters */
150 	if (phy_adr > PHYADR_MASK) {
151 		printf("Err..(%s) Invalid phy address\n", __func__);
152 		return -EINVAL;
153 	}
154 	if (reg_ofs > PHYREG_MASK) {
155 		printf("Err..(%s) Invalid register offset\n", __func__);
156 		return -EFAULT;
157 	}
158 
159 	/* wait till the SMI is not busy */
160 	if (smi_wait_ready(dmvgbe) < 0)
161 		return -EFAULT;
162 
163 	/* fill the phy addr and reg offset and write opcode and data */
164 	smi_reg = (data << MVGBE_PHY_SMI_DATA_OFFS);
165 	smi_reg |= (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
166 		| (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS);
167 	smi_reg &= ~MVGBE_PHY_SMI_OPCODE_READ;
168 
169 	/* write the smi register */
170 	MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
171 
172 	return 0;
173 }
174 #endif
175 
176 /* Stop and checks all queues */
stop_queue(u32 * qreg)177 static void stop_queue(u32 * qreg)
178 {
179 	u32 reg_data;
180 
181 	reg_data = readl(qreg);
182 
183 	if (reg_data & 0xFF) {
184 		/* Issue stop command for active channels only */
185 		writel((reg_data << 8), qreg);
186 
187 		/* Wait for all queue activity to terminate. */
188 		do {
189 			/*
190 			 * Check port cause register that all queues
191 			 * are stopped
192 			 */
193 			reg_data = readl(qreg);
194 		}
195 		while (reg_data & 0xFF);
196 	}
197 }
198 
199 /*
200  * set_access_control - Config address decode parameters for Ethernet unit
201  *
202  * This function configures the address decode parameters for the Gigabit
203  * Ethernet Controller according the given parameters struct.
204  *
205  * @regs	Register struct pointer.
206  * @param	Address decode parameter struct.
207  */
set_access_control(struct mvgbe_registers * regs,struct mvgbe_winparam * param)208 static void set_access_control(struct mvgbe_registers *regs,
209 				struct mvgbe_winparam *param)
210 {
211 	u32 access_prot_reg;
212 
213 	/* Set access control register */
214 	access_prot_reg = MVGBE_REG_RD(regs->epap);
215 	/* clear window permission */
216 	access_prot_reg &= (~(3 << (param->win * 2)));
217 	access_prot_reg |= (param->access_ctrl << (param->win * 2));
218 	MVGBE_REG_WR(regs->epap, access_prot_reg);
219 
220 	/* Set window Size reg (SR) */
221 	MVGBE_REG_WR(regs->barsz[param->win].size,
222 			(((param->size / 0x10000) - 1) << 16));
223 
224 	/* Set window Base address reg (BA) */
225 	MVGBE_REG_WR(regs->barsz[param->win].bar,
226 			(param->target | param->attrib | param->base_addr));
227 	/* High address remap reg (HARR) */
228 	if (param->win < 4)
229 		MVGBE_REG_WR(regs->ha_remap[param->win], param->high_addr);
230 
231 	/* Base address enable reg (BARER) */
232 	if (param->enable == 1)
233 		MVGBE_REG_BITS_RESET(regs->bare, (1 << param->win));
234 	else
235 		MVGBE_REG_BITS_SET(regs->bare, (1 << param->win));
236 }
237 
set_dram_access(struct mvgbe_registers * regs)238 static void set_dram_access(struct mvgbe_registers *regs)
239 {
240 	struct mvgbe_winparam win_param;
241 	int i;
242 
243 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
244 		/* Set access parameters for DRAM bank i */
245 		win_param.win = i;	/* Use Ethernet window i */
246 		/* Window target - DDR */
247 		win_param.target = MVGBE_TARGET_DRAM;
248 		/* Enable full access */
249 		win_param.access_ctrl = EWIN_ACCESS_FULL;
250 		win_param.high_addr = 0;
251 		/* Get bank base and size */
252 		win_param.base_addr = gd->bd->bi_dram[i].start;
253 		win_param.size = gd->bd->bi_dram[i].size;
254 		if (win_param.size == 0)
255 			win_param.enable = 0;
256 		else
257 			win_param.enable = 1;	/* Enable the access */
258 
259 		/* Enable DRAM bank */
260 		switch (i) {
261 		case 0:
262 			win_param.attrib = EBAR_DRAM_CS0;
263 			break;
264 		case 1:
265 			win_param.attrib = EBAR_DRAM_CS1;
266 			break;
267 		case 2:
268 			win_param.attrib = EBAR_DRAM_CS2;
269 			break;
270 		case 3:
271 			win_param.attrib = EBAR_DRAM_CS3;
272 			break;
273 		default:
274 			/* invalid bank, disable access */
275 			win_param.enable = 0;
276 			win_param.attrib = 0;
277 			break;
278 		}
279 		/* Set the access control for address window(EPAPR) RD/WR */
280 		set_access_control(regs, &win_param);
281 	}
282 }
283 
284 /*
285  * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
286  *
287  * Go through all the DA filter tables (Unicast, Special Multicast & Other
288  * Multicast) and set each entry to 0.
289  */
port_init_mac_tables(struct mvgbe_registers * regs)290 static void port_init_mac_tables(struct mvgbe_registers *regs)
291 {
292 	int table_index;
293 
294 	/* Clear DA filter unicast table (Ex_dFUT) */
295 	for (table_index = 0; table_index < 4; ++table_index)
296 		MVGBE_REG_WR(regs->dfut[table_index], 0);
297 
298 	for (table_index = 0; table_index < 64; ++table_index) {
299 		/* Clear DA filter special multicast table (Ex_dFSMT) */
300 		MVGBE_REG_WR(regs->dfsmt[table_index], 0);
301 		/* Clear DA filter other multicast table (Ex_dFOMT) */
302 		MVGBE_REG_WR(regs->dfomt[table_index], 0);
303 	}
304 }
305 
306 /*
307  * port_uc_addr - This function Set the port unicast address table
308  *
309  * This function locates the proper entry in the Unicast table for the
310  * specified MAC nibble and sets its properties according to function
311  * parameters.
312  * This function add/removes MAC addresses from the port unicast address
313  * table.
314  *
315  * @uc_nibble	Unicast MAC Address last nibble.
316  * @option      0 = Add, 1 = remove address.
317  *
318  * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
319  */
port_uc_addr(struct mvgbe_registers * regs,u8 uc_nibble,int option)320 static int port_uc_addr(struct mvgbe_registers *regs, u8 uc_nibble,
321 			int option)
322 {
323 	u32 unicast_reg;
324 	u32 tbl_offset;
325 	u32 reg_offset;
326 
327 	/* Locate the Unicast table entry */
328 	uc_nibble = (0xf & uc_nibble);
329 	/* Register offset from unicast table base */
330 	tbl_offset = (uc_nibble / 4);
331 	/* Entry offset within the above register */
332 	reg_offset = uc_nibble % 4;
333 
334 	switch (option) {
335 	case REJECT_MAC_ADDR:
336 		/*
337 		 * Clear accepts frame bit at specified unicast
338 		 * DA table entry
339 		 */
340 		unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
341 		unicast_reg &= (0xFF << (8 * reg_offset));
342 		MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
343 		break;
344 	case ACCEPT_MAC_ADDR:
345 		/* Set accepts frame bit at unicast DA filter table entry */
346 		unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
347 		unicast_reg &= (0xFF << (8 * reg_offset));
348 		unicast_reg |= ((0x01 | (RXUQ << 1)) << (8 * reg_offset));
349 		MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
350 		break;
351 	default:
352 		return 0;
353 	}
354 	return 1;
355 }
356 
357 /*
358  * port_uc_addr_set - This function Set the port Unicast address.
359  */
port_uc_addr_set(struct mvgbe_registers * regs,u8 * p_addr)360 static void port_uc_addr_set(struct mvgbe_registers *regs, u8 * p_addr)
361 {
362 	u32 mac_h;
363 	u32 mac_l;
364 
365 	mac_l = (p_addr[4] << 8) | (p_addr[5]);
366 	mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
367 		(p_addr[3] << 0);
368 
369 	MVGBE_REG_WR(regs->macal, mac_l);
370 	MVGBE_REG_WR(regs->macah, mac_h);
371 
372 	/* Accept frames of this address */
373 	port_uc_addr(regs, p_addr[5], ACCEPT_MAC_ADDR);
374 }
375 
376 /*
377  * mvgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
378  */
mvgbe_init_rx_desc_ring(struct mvgbe_device * dmvgbe)379 static void mvgbe_init_rx_desc_ring(struct mvgbe_device *dmvgbe)
380 {
381 	struct mvgbe_rxdesc *p_rx_desc;
382 	int i;
383 
384 	/* initialize the Rx descriptors ring */
385 	p_rx_desc = dmvgbe->p_rxdesc;
386 	for (i = 0; i < RINGSZ; i++) {
387 		p_rx_desc->cmd_sts =
388 			MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
389 		p_rx_desc->buf_size = PKTSIZE_ALIGN;
390 		p_rx_desc->byte_cnt = 0;
391 		p_rx_desc->buf_ptr = dmvgbe->p_rxbuf + i * PKTSIZE_ALIGN;
392 		if (i == (RINGSZ - 1))
393 			p_rx_desc->nxtdesc_p = dmvgbe->p_rxdesc;
394 		else {
395 			p_rx_desc->nxtdesc_p = (struct mvgbe_rxdesc *)
396 				((u32) p_rx_desc + MV_RXQ_DESC_ALIGNED_SIZE);
397 			p_rx_desc = p_rx_desc->nxtdesc_p;
398 		}
399 	}
400 	dmvgbe->p_rxdesc_curr = dmvgbe->p_rxdesc;
401 }
402 
mvgbe_init(struct eth_device * dev)403 static int mvgbe_init(struct eth_device *dev)
404 {
405 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
406 	struct mvgbe_registers *regs = dmvgbe->regs;
407 #if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) &&  \
408 	!defined(CONFIG_PHYLIB) &&			 \
409 	defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
410 	int i;
411 #endif
412 	/* setup RX rings */
413 	mvgbe_init_rx_desc_ring(dmvgbe);
414 
415 	/* Clear the ethernet port interrupts */
416 	MVGBE_REG_WR(regs->ic, 0);
417 	MVGBE_REG_WR(regs->ice, 0);
418 	/* Unmask RX buffer and TX end interrupt */
419 	MVGBE_REG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
420 	/* Unmask phy and link status changes interrupts */
421 	MVGBE_REG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
422 
423 	set_dram_access(regs);
424 	port_init_mac_tables(regs);
425 	port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
426 
427 	/* Assign port configuration and command. */
428 	MVGBE_REG_WR(regs->pxc, PRT_CFG_VAL);
429 	MVGBE_REG_WR(regs->pxcx, PORT_CFG_EXTEND_VALUE);
430 	MVGBE_REG_WR(regs->psc0, PORT_SERIAL_CONTROL_VALUE);
431 
432 	/* Assign port SDMA configuration */
433 	MVGBE_REG_WR(regs->sdc, PORT_SDMA_CFG_VALUE);
434 	MVGBE_REG_WR(regs->tqx[0].qxttbc, QTKNBKT_DEF_VAL);
435 	MVGBE_REG_WR(regs->tqx[0].tqxtbc,
436 		(QMTBS_DEF_VAL << 16) | QTKNRT_DEF_VAL);
437 	/* Turn off the port/RXUQ bandwidth limitation */
438 	MVGBE_REG_WR(regs->pmtu, 0);
439 
440 	/* Set maximum receive buffer to 9700 bytes */
441 	MVGBE_REG_WR(regs->psc0, MVGBE_MAX_RX_PACKET_9700BYTE
442 			| (MVGBE_REG_RD(regs->psc0) & MRU_MASK));
443 
444 	/* Enable port initially */
445 	MVGBE_REG_BITS_SET(regs->psc0, MVGBE_SERIAL_PORT_EN);
446 
447 	/*
448 	 * Set ethernet MTU for leaky bucket mechanism to 0 - this will
449 	 * disable the leaky bucket mechanism .
450 	 */
451 	MVGBE_REG_WR(regs->pmtu, 0);
452 
453 	/* Assignment of Rx CRDB of given RXUQ */
454 	MVGBE_REG_WR(regs->rxcdp[RXUQ], (u32) dmvgbe->p_rxdesc_curr);
455 	/* ensure previous write is done before enabling Rx DMA */
456 	isb();
457 	/* Enable port Rx. */
458 	MVGBE_REG_WR(regs->rqc, (1 << RXUQ));
459 
460 #if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) && \
461 	!defined(CONFIG_PHYLIB) && \
462 	defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
463 	/* Wait up to 5s for the link status */
464 	for (i = 0; i < 5; i++) {
465 		u16 phyadr;
466 
467 		miiphy_read(dev->name, MV_PHY_ADR_REQUEST,
468 				MV_PHY_ADR_REQUEST, &phyadr);
469 		/* Return if we get link up */
470 		if (miiphy_link(dev->name, phyadr))
471 			return 0;
472 		udelay(1000000);
473 	}
474 
475 	printf("No link on %s\n", dev->name);
476 	return -1;
477 #endif
478 	return 0;
479 }
480 
mvgbe_halt(struct eth_device * dev)481 static int mvgbe_halt(struct eth_device *dev)
482 {
483 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
484 	struct mvgbe_registers *regs = dmvgbe->regs;
485 
486 	/* Disable all gigE address decoder */
487 	MVGBE_REG_WR(regs->bare, 0x3f);
488 
489 	stop_queue(&regs->tqc);
490 	stop_queue(&regs->rqc);
491 
492 	/* Disable port */
493 	MVGBE_REG_BITS_RESET(regs->psc0, MVGBE_SERIAL_PORT_EN);
494 	/* Set port is not reset */
495 	MVGBE_REG_BITS_RESET(regs->psc1, 1 << 4);
496 #ifdef CONFIG_SYS_MII_MODE
497 	/* Set MMI interface up */
498 	MVGBE_REG_BITS_RESET(regs->psc1, 1 << 3);
499 #endif
500 	/* Disable & mask ethernet port interrupts */
501 	MVGBE_REG_WR(regs->ic, 0);
502 	MVGBE_REG_WR(regs->ice, 0);
503 	MVGBE_REG_WR(regs->pim, 0);
504 	MVGBE_REG_WR(regs->peim, 0);
505 
506 	return 0;
507 }
508 
mvgbe_write_hwaddr(struct eth_device * dev)509 static int mvgbe_write_hwaddr(struct eth_device *dev)
510 {
511 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
512 	struct mvgbe_registers *regs = dmvgbe->regs;
513 
514 	/* Programs net device MAC address after initialization */
515 	port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
516 	return 0;
517 }
518 
mvgbe_send(struct eth_device * dev,void * dataptr,int datasize)519 static int mvgbe_send(struct eth_device *dev, void *dataptr, int datasize)
520 {
521 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
522 	struct mvgbe_registers *regs = dmvgbe->regs;
523 	struct mvgbe_txdesc *p_txdesc = dmvgbe->p_txdesc;
524 	void *p = (void *)dataptr;
525 	u32 cmd_sts;
526 	u32 txuq0_reg_addr;
527 
528 	/* Copy buffer if it's misaligned */
529 	if ((u32) dataptr & 0x07) {
530 		if (datasize > PKTSIZE_ALIGN) {
531 			printf("Non-aligned data too large (%d)\n",
532 					datasize);
533 			return -1;
534 		}
535 
536 		memcpy(dmvgbe->p_aligned_txbuf, p, datasize);
537 		p = dmvgbe->p_aligned_txbuf;
538 	}
539 
540 	p_txdesc->cmd_sts = MVGBE_ZERO_PADDING | MVGBE_GEN_CRC;
541 	p_txdesc->cmd_sts |= MVGBE_TX_FIRST_DESC | MVGBE_TX_LAST_DESC;
542 	p_txdesc->cmd_sts |= MVGBE_BUFFER_OWNED_BY_DMA;
543 	p_txdesc->cmd_sts |= MVGBE_TX_EN_INTERRUPT;
544 	p_txdesc->buf_ptr = (u8 *) p;
545 	p_txdesc->byte_cnt = datasize;
546 
547 	/* Set this tc desc as zeroth TXUQ */
548 	txuq0_reg_addr = (u32)&regs->tcqdp[TXUQ];
549 	writel((u32) p_txdesc, txuq0_reg_addr);
550 
551 	/* ensure tx desc writes above are performed before we start Tx DMA */
552 	isb();
553 
554 	/* Apply send command using zeroth TXUQ */
555 	MVGBE_REG_WR(regs->tqc, (1 << TXUQ));
556 
557 	/*
558 	 * wait for packet xmit completion
559 	 */
560 	cmd_sts = readl(&p_txdesc->cmd_sts);
561 	while (cmd_sts & MVGBE_BUFFER_OWNED_BY_DMA) {
562 		/* return fail if error is detected */
563 		if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
564 				(MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
565 				cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
566 			printf("Err..(%s) in xmit packet\n", __func__);
567 			return -1;
568 		}
569 		cmd_sts = readl(&p_txdesc->cmd_sts);
570 	};
571 	return 0;
572 }
573 
mvgbe_recv(struct eth_device * dev)574 static int mvgbe_recv(struct eth_device *dev)
575 {
576 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
577 	struct mvgbe_rxdesc *p_rxdesc_curr = dmvgbe->p_rxdesc_curr;
578 	u32 cmd_sts;
579 	u32 timeout = 0;
580 	u32 rxdesc_curr_addr;
581 
582 	/* wait untill rx packet available or timeout */
583 	do {
584 		if (timeout < MVGBE_PHY_SMI_TIMEOUT)
585 			timeout++;
586 		else {
587 			debug("%s time out...\n", __func__);
588 			return -1;
589 		}
590 	} while (readl(&p_rxdesc_curr->cmd_sts) & MVGBE_BUFFER_OWNED_BY_DMA);
591 
592 	if (p_rxdesc_curr->byte_cnt != 0) {
593 		debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
594 			__func__, (u32) p_rxdesc_curr->byte_cnt,
595 			(u32) p_rxdesc_curr->buf_ptr,
596 			(u32) p_rxdesc_curr->cmd_sts);
597 	}
598 
599 	/*
600 	 * In case received a packet without first/last bits on
601 	 * OR the error summary bit is on,
602 	 * the packets needs to be dropeed.
603 	 */
604 	cmd_sts = readl(&p_rxdesc_curr->cmd_sts);
605 
606 	if ((cmd_sts &
607 		(MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC))
608 		!= (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC)) {
609 
610 		printf("Err..(%s) Dropping packet spread on"
611 			" multiple descriptors\n", __func__);
612 
613 	} else if (cmd_sts & MVGBE_ERROR_SUMMARY) {
614 
615 		printf("Err..(%s) Dropping packet with errors\n",
616 			__func__);
617 
618 	} else {
619 		/* !!! call higher layer processing */
620 		debug("%s: Sending Received packet to"
621 		      " upper layer (net_process_received_packet)\n",
622 		      __func__);
623 
624 		/* let the upper layer handle the packet */
625 		net_process_received_packet((p_rxdesc_curr->buf_ptr +
626 					     RX_BUF_OFFSET),
627 					    (int)(p_rxdesc_curr->byte_cnt -
628 						  RX_BUF_OFFSET));
629 	}
630 	/*
631 	 * free these descriptors and point next in the ring
632 	 */
633 	p_rxdesc_curr->cmd_sts =
634 		MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
635 	p_rxdesc_curr->buf_size = PKTSIZE_ALIGN;
636 	p_rxdesc_curr->byte_cnt = 0;
637 
638 	rxdesc_curr_addr = (u32)&dmvgbe->p_rxdesc_curr;
639 	writel((unsigned)p_rxdesc_curr->nxtdesc_p, rxdesc_curr_addr);
640 
641 	return 0;
642 }
643 
644 #if defined(CONFIG_PHYLIB)
mvgbe_phylib_init(struct eth_device * dev,int phyid)645 int mvgbe_phylib_init(struct eth_device *dev, int phyid)
646 {
647 	struct mii_dev *bus;
648 	struct phy_device *phydev;
649 	int ret;
650 
651 	bus = mdio_alloc();
652 	if (!bus) {
653 		printf("mdio_alloc failed\n");
654 		return -ENOMEM;
655 	}
656 	bus->read = smi_reg_read;
657 	bus->write = smi_reg_write;
658 	strcpy(bus->name, dev->name);
659 
660 	ret = mdio_register(bus);
661 	if (ret) {
662 		printf("mdio_register failed\n");
663 		free(bus);
664 		return -ENOMEM;
665 	}
666 
667 	/* Set phy address of the port */
668 	smi_reg_write(bus, MV_PHY_ADR_REQUEST, 0, MV_PHY_ADR_REQUEST, phyid);
669 
670 	phydev = phy_connect(bus, phyid, dev, PHY_INTERFACE_MODE_RGMII);
671 	if (!phydev) {
672 		printf("phy_connect failed\n");
673 		return -ENODEV;
674 	}
675 
676 	phy_config(phydev);
677 	phy_startup(phydev);
678 
679 	return 0;
680 }
681 #endif
682 
mvgbe_initialize(bd_t * bis)683 int mvgbe_initialize(bd_t *bis)
684 {
685 	struct mvgbe_device *dmvgbe;
686 	struct eth_device *dev;
687 	int devnum;
688 	u8 used_ports[MAX_MVGBE_DEVS] = CONFIG_MVGBE_PORTS;
689 
690 	for (devnum = 0; devnum < MAX_MVGBE_DEVS; devnum++) {
691 		/*skip if port is configured not to use */
692 		if (used_ports[devnum] == 0)
693 			continue;
694 
695 		dmvgbe = malloc(sizeof(struct mvgbe_device));
696 
697 		if (!dmvgbe)
698 			goto error1;
699 
700 		memset(dmvgbe, 0, sizeof(struct mvgbe_device));
701 
702 		dmvgbe->p_rxdesc =
703 			(struct mvgbe_rxdesc *)memalign(PKTALIGN,
704 			MV_RXQ_DESC_ALIGNED_SIZE*RINGSZ + 1);
705 
706 		if (!dmvgbe->p_rxdesc)
707 			goto error2;
708 
709 		dmvgbe->p_rxbuf = (u8 *) memalign(PKTALIGN,
710 			RINGSZ*PKTSIZE_ALIGN + 1);
711 
712 		if (!dmvgbe->p_rxbuf)
713 			goto error3;
714 
715 		dmvgbe->p_aligned_txbuf = memalign(8, PKTSIZE_ALIGN);
716 
717 		if (!dmvgbe->p_aligned_txbuf)
718 			goto error4;
719 
720 		dmvgbe->p_txdesc = (struct mvgbe_txdesc *) memalign(
721 			PKTALIGN, sizeof(struct mvgbe_txdesc) + 1);
722 
723 		if (!dmvgbe->p_txdesc) {
724 			free(dmvgbe->p_aligned_txbuf);
725 error4:
726 			free(dmvgbe->p_rxbuf);
727 error3:
728 			free(dmvgbe->p_rxdesc);
729 error2:
730 			free(dmvgbe);
731 error1:
732 			printf("Err.. %s Failed to allocate memory\n",
733 				__func__);
734 			return -1;
735 		}
736 
737 		dev = &dmvgbe->dev;
738 
739 		/* must be less than sizeof(dev->name) */
740 		sprintf(dev->name, "egiga%d", devnum);
741 
742 		switch (devnum) {
743 		case 0:
744 			dmvgbe->regs = (void *)MVGBE0_BASE;
745 			break;
746 #if defined(MVGBE1_BASE)
747 		case 1:
748 			dmvgbe->regs = (void *)MVGBE1_BASE;
749 			break;
750 #endif
751 		default:	/* this should never happen */
752 			printf("Err..(%s) Invalid device number %d\n",
753 				__func__, devnum);
754 			return -1;
755 		}
756 
757 		dev->init = (void *)mvgbe_init;
758 		dev->halt = (void *)mvgbe_halt;
759 		dev->send = (void *)mvgbe_send;
760 		dev->recv = (void *)mvgbe_recv;
761 		dev->write_hwaddr = (void *)mvgbe_write_hwaddr;
762 
763 		eth_register(dev);
764 
765 #if defined(CONFIG_PHYLIB)
766 		mvgbe_phylib_init(dev, PHY_BASE_ADR + devnum);
767 #elif defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
768 		int retval;
769 		struct mii_dev *mdiodev = mdio_alloc();
770 		if (!mdiodev)
771 			return -ENOMEM;
772 		strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
773 		mdiodev->read = smi_reg_read;
774 		mdiodev->write = smi_reg_write;
775 
776 		retval = mdio_register(mdiodev);
777 		if (retval < 0)
778 			return retval;
779 		/* Set phy address of the port */
780 		miiphy_write(dev->name, MV_PHY_ADR_REQUEST,
781 				MV_PHY_ADR_REQUEST, PHY_BASE_ADR + devnum);
782 #endif
783 	}
784 	return 0;
785 }
786