• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * gPXE driver for Marvell Yukon chipset and SysKonnect Gigabit
3  * Ethernet adapters. Derived from Linux skge driver (v1.13), which was
4  * based on earlier sk98lin, e100 and FreeBSD if_sk drivers.
5  *
6  * This driver intentionally does not support all the features of the
7  * original driver such as link fail-over and link management because
8  * those should be done at higher levels.
9  *
10  * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
11  *
12  * Modified for gPXE, July 2008 by Michael Decker <mrd999@gmail.com>
13  * Tested and Modified in December 2009 by
14  *    Thomas Miletich <thomas.miletich@gmail.com>
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License.
19  *
20  * This program is distributed in the hope that it will be useful,
21  * but WITHOUT ANY WARRANTY; without even the implied warranty of
22  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
23  * GNU General Public License for more details.
24  *
25  * You should have received a copy of the GNU General Public License
26  * along with this program; if not, write to the Free Software
27  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28  */
29 
30 FILE_LICENCE ( GPL2_ONLY );
31 
32 #include <stdint.h>
33 #include <errno.h>
34 #include <stdio.h>
35 #include <unistd.h>
36 #include <gpxe/netdevice.h>
37 #include <gpxe/ethernet.h>
38 #include <gpxe/if_ether.h>
39 #include <gpxe/iobuf.h>
40 #include <gpxe/malloc.h>
41 #include <gpxe/pci.h>
42 
43 #include "skge.h"
44 
45 static struct pci_device_id skge_id_table[] = {
46 	PCI_ROM(0x10b7, 0x1700,     "3C940",     "3COM 3C940", 0),
47 	PCI_ROM(0x10b7, 0x80eb,     "3C940B",    "3COM 3C940", 0),
48 	PCI_ROM(0x1148, 0x4300,     "GE",        "Syskonnect GE", 0),
49 	PCI_ROM(0x1148, 0x4320,     "YU",        "Syskonnect YU", 0),
50 	PCI_ROM(0x1186, 0x4C00,     "DGE510T",   "DLink DGE-510T", 0),
51 	PCI_ROM(0x1186, 0x4b01,     "DGE530T",   "DLink DGE-530T", 0),
52 	PCI_ROM(0x11ab, 0x4320,     "id4320",    "Marvell id4320", 0),
53 	PCI_ROM(0x11ab, 0x5005,     "id5005",    "Marvell id5005", 0), /* Belkin */
54 	PCI_ROM(0x1371, 0x434e,     "Gigacard",  "CNET Gigacard", 0),
55 	PCI_ROM(0x1737, 0x1064,     "EG1064",    "Linksys EG1064", 0),
56 	PCI_ROM(0x1737, 0xffff,     "id_any",    "Linksys [any]", 0)
57 };
58 
59 static int skge_up(struct net_device *dev);
60 static void skge_down(struct net_device *dev);
61 static void skge_tx_clean(struct net_device *dev);
62 static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
63 static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
64 static void yukon_init(struct skge_hw *hw, int port);
65 static void genesis_mac_init(struct skge_hw *hw, int port);
66 static void genesis_link_up(struct skge_port *skge);
67 
68 static void skge_phyirq(struct skge_hw *hw);
69 static void skge_poll(struct net_device *dev);
70 static int skge_xmit_frame(struct net_device *dev, struct io_buffer *iob);
71 static void skge_net_irq ( struct net_device *dev, int enable );
72 
73 static void skge_rx_refill(struct net_device *dev);
74 
75 static struct net_device_operations skge_operations = {
76 	.open     = skge_up,
77 	.close    = skge_down,
78 	.transmit = skge_xmit_frame,
79 	.poll     = skge_poll,
80 	.irq      = skge_net_irq
81 };
82 
83 /* Avoid conditionals by using array */
84 static const int txqaddr[] = { Q_XA1, Q_XA2 };
85 static const int rxqaddr[] = { Q_R1, Q_R2 };
86 static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
87 static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
88 static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F };
89 static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 };
90 
91 /* Determine supported/advertised modes based on hardware.
92  * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx
93  */
skge_supported_modes(const struct skge_hw * hw)94 static u32 skge_supported_modes(const struct skge_hw *hw)
95 {
96 	u32 supported;
97 
98 	if (hw->copper) {
99 		supported = SUPPORTED_10baseT_Half
100 			| SUPPORTED_10baseT_Full
101 			| SUPPORTED_100baseT_Half
102 			| SUPPORTED_100baseT_Full
103 			| SUPPORTED_1000baseT_Half
104 			| SUPPORTED_1000baseT_Full
105 			| SUPPORTED_Autoneg| SUPPORTED_TP;
106 
107 		if (hw->chip_id == CHIP_ID_GENESIS)
108 			supported &= ~(SUPPORTED_10baseT_Half
109 					     | SUPPORTED_10baseT_Full
110 					     | SUPPORTED_100baseT_Half
111 					     | SUPPORTED_100baseT_Full);
112 
113 		else if (hw->chip_id == CHIP_ID_YUKON)
114 			supported &= ~SUPPORTED_1000baseT_Half;
115 	} else
116 		supported = SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half
117 			| SUPPORTED_FIBRE | SUPPORTED_Autoneg;
118 
119 	return supported;
120 }
121 
122 /* Chip internal frequency for clock calculations */
hwkhz(const struct skge_hw * hw)123 static inline u32 hwkhz(const struct skge_hw *hw)
124 {
125 	return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125;
126 }
127 
128 /* Microseconds to chip HZ */
skge_usecs2clk(const struct skge_hw * hw,u32 usec)129 static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec)
130 {
131 	return hwkhz(hw) * usec / 1000;
132 }
133 
134 enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST };
skge_led(struct skge_port * skge,enum led_mode mode)135 static void skge_led(struct skge_port *skge, enum led_mode mode)
136 {
137 	struct skge_hw *hw = skge->hw;
138 	int port = skge->port;
139 
140 	if (hw->chip_id == CHIP_ID_GENESIS) {
141 		switch (mode) {
142 		case LED_MODE_OFF:
143 			if (hw->phy_type == SK_PHY_BCOM)
144 				xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF);
145 			else {
146 				skge_write32(hw, SK_REG(port, TX_LED_VAL), 0);
147 				skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF);
148 			}
149 			skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
150 			skge_write32(hw, SK_REG(port, RX_LED_VAL), 0);
151 			skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF);
152 			break;
153 
154 		case LED_MODE_ON:
155 			skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON);
156 			skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON);
157 
158 			skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
159 			skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
160 
161 			break;
162 
163 		case LED_MODE_TST:
164 			skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON);
165 			skge_write32(hw, SK_REG(port, RX_LED_VAL), 100);
166 			skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
167 
168 			if (hw->phy_type == SK_PHY_BCOM)
169 				xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON);
170 			else {
171 				skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON);
172 				skge_write32(hw, SK_REG(port, TX_LED_VAL), 100);
173 				skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
174 			}
175 
176 		}
177 	} else {
178 		switch (mode) {
179 		case LED_MODE_OFF:
180 			gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
181 			gm_phy_write(hw, port, PHY_MARV_LED_OVER,
182 				     PHY_M_LED_MO_DUP(MO_LED_OFF)  |
183 				     PHY_M_LED_MO_10(MO_LED_OFF)   |
184 				     PHY_M_LED_MO_100(MO_LED_OFF)  |
185 				     PHY_M_LED_MO_1000(MO_LED_OFF) |
186 				     PHY_M_LED_MO_RX(MO_LED_OFF));
187 			break;
188 		case LED_MODE_ON:
189 			gm_phy_write(hw, port, PHY_MARV_LED_CTRL,
190 				     PHY_M_LED_PULS_DUR(PULS_170MS) |
191 				     PHY_M_LED_BLINK_RT(BLINK_84MS) |
192 				     PHY_M_LEDC_TX_CTRL |
193 				     PHY_M_LEDC_DP_CTRL);
194 
195 			gm_phy_write(hw, port, PHY_MARV_LED_OVER,
196 				     PHY_M_LED_MO_RX(MO_LED_OFF) |
197 				     (skge->speed == SPEED_100 ?
198 				      PHY_M_LED_MO_100(MO_LED_ON) : 0));
199 			break;
200 		case LED_MODE_TST:
201 			gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
202 			gm_phy_write(hw, port, PHY_MARV_LED_OVER,
203 				     PHY_M_LED_MO_DUP(MO_LED_ON)  |
204 				     PHY_M_LED_MO_10(MO_LED_ON)   |
205 				     PHY_M_LED_MO_100(MO_LED_ON)  |
206 				     PHY_M_LED_MO_1000(MO_LED_ON) |
207 				     PHY_M_LED_MO_RX(MO_LED_ON));
208 		}
209 	}
210 }
211 
212 /*
213  * I've left in these EEPROM and VPD functions, as someone may desire to
214  * integrate them in the future. -mdeck
215  *
216  * static int skge_get_eeprom_len(struct net_device *dev)
217  * {
218  * 	struct skge_port *skge = netdev_priv(dev);
219  * 	u32 reg2;
220  *
221  * 	pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, &reg2);
222  * 	return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
223  * }
224  *
225  * static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset)
226  * {
227  * 	u32 val;
228  *
229  * 	pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset);
230  *
231  * 	do {
232  * 		pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
233  * 	} while (!(offset & PCI_VPD_ADDR_F));
234  *
235  * 	pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val);
236  * 	return val;
237  * }
238  *
239  * static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val)
240  * {
241  * 	pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val);
242  * 	pci_write_config_word(pdev, cap + PCI_VPD_ADDR,
243  * 			      offset | PCI_VPD_ADDR_F);
244  *
245  * 	do {
246  * 		pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
247  * 	} while (offset & PCI_VPD_ADDR_F);
248  * }
249  *
250  * static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
251  * 			   u8 *data)
252  * {
253  * 	struct skge_port *skge = netdev_priv(dev);
254  * 	struct pci_dev *pdev = skge->hw->pdev;
255  * 	int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
256  * 	int length = eeprom->len;
257  * 	u16 offset = eeprom->offset;
258  *
259  * 	if (!cap)
260  * 		return -EINVAL;
261  *
262  * 	eeprom->magic = SKGE_EEPROM_MAGIC;
263  *
264  * 	while (length > 0) {
265  * 		u32 val = skge_vpd_read(pdev, cap, offset);
266  * 		int n = min_t(int, length, sizeof(val));
267  *
268  * 		memcpy(data, &val, n);
269  * 		length -= n;
270  * 		data += n;
271  * 		offset += n;
272  * 	}
273  * 	return 0;
274  * }
275  *
276  * static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
277  * 			   u8 *data)
278  * {
279  * 	struct skge_port *skge = netdev_priv(dev);
280  * 	struct pci_dev *pdev = skge->hw->pdev;
281  * 	int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
282  * 	int length = eeprom->len;
283  * 	u16 offset = eeprom->offset;
284  *
285  * 	if (!cap)
286  * 		return -EINVAL;
287  *
288  * 	if (eeprom->magic != SKGE_EEPROM_MAGIC)
289  * 		return -EINVAL;
290  *
291  * 	while (length > 0) {
292  * 		u32 val;
293  * 		int n = min_t(int, length, sizeof(val));
294  *
295  * 		if (n < sizeof(val))
296  * 			val = skge_vpd_read(pdev, cap, offset);
297  * 		memcpy(&val, data, n);
298  *
299  * 		skge_vpd_write(pdev, cap, offset, val);
300  *
301  * 		length -= n;
302  * 		data += n;
303  * 		offset += n;
304  * 	}
305  * 	return 0;
306  * }
307  */
308 
309 /*
310  * Allocate ring elements and chain them together
311  * One-to-one association of board descriptors with ring elements
312  */
skge_ring_alloc(struct skge_ring * ring,void * vaddr,u32 base,size_t num)313 static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base,
314                            size_t num)
315 {
316 	struct skge_tx_desc *d;
317 	struct skge_element *e;
318 	unsigned int i;
319 
320 	ring->start = zalloc(num*sizeof(*e));
321 	if (!ring->start)
322 		return -ENOMEM;
323 
324 	for (i = 0, e = ring->start, d = vaddr; i < num; i++, e++, d++) {
325 		e->desc = d;
326 		if (i == num - 1) {
327 			e->next = ring->start;
328 			d->next_offset = base;
329 		} else {
330 			e->next = e + 1;
331 			d->next_offset = base + (i+1) * sizeof(*d);
332 		}
333 	}
334 	ring->to_use = ring->to_clean = ring->start;
335 
336 	return 0;
337 }
338 
339 /* Allocate and setup a new buffer for receiving */
skge_rx_setup(struct skge_port * skge __unused,struct skge_element * e,struct io_buffer * iob,unsigned int bufsize)340 static void skge_rx_setup(struct skge_port *skge __unused,
341 			  struct skge_element *e,
342 			  struct io_buffer *iob, unsigned int bufsize)
343 {
344 	struct skge_rx_desc *rd = e->desc;
345 	u64 map;
346 
347 	map = ( iob != NULL ) ? virt_to_bus(iob->data) : 0;
348 
349 	rd->dma_lo = map;
350 	rd->dma_hi = map >> 32;
351 	e->iob = iob;
352 	rd->csum1_start = ETH_HLEN;
353 	rd->csum2_start = ETH_HLEN;
354 	rd->csum1 = 0;
355 	rd->csum2 = 0;
356 
357 	wmb();
358 
359 	rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
360 }
361 
362 /* Resume receiving using existing skb,
363  * Note: DMA address is not changed by chip.
364  * 	 MTU not changed while receiver active.
365  */
skge_rx_reuse(struct skge_element * e,unsigned int size)366 static inline void skge_rx_reuse(struct skge_element *e, unsigned int size)
367 {
368 	struct skge_rx_desc *rd = e->desc;
369 
370 	rd->csum2 = 0;
371 	rd->csum2_start = ETH_HLEN;
372 
373 	wmb();
374 
375 	rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size;
376 }
377 
378 
379 /* Free all  buffers in receive ring, assumes receiver stopped */
skge_rx_clean(struct skge_port * skge)380 static void skge_rx_clean(struct skge_port *skge)
381 {
382 	struct skge_ring *ring = &skge->rx_ring;
383 	struct skge_element *e;
384 
385 	e = ring->start;
386 	do {
387 		struct skge_rx_desc *rd = e->desc;
388 		rd->control = 0;
389 		if (e->iob) {
390 			free_iob(e->iob);
391 			e->iob = NULL;
392 		}
393 	} while ((e = e->next) != ring->start);
394 }
395 
skge_link_up(struct skge_port * skge)396 static void skge_link_up(struct skge_port *skge)
397 {
398 	skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
399 		    LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
400 
401 	netdev_link_up(skge->netdev);
402 
403 	DBG2(PFX "%s: Link is up at %d Mbps, %s duplex\n",
404 	     skge->netdev->name, skge->speed,
405 	     skge->duplex == DUPLEX_FULL ? "full" : "half");
406 }
407 
skge_link_down(struct skge_port * skge)408 static void skge_link_down(struct skge_port *skge)
409 {
410 	skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
411 	netdev_link_down(skge->netdev);
412 
413 	DBG2(PFX "%s: Link is down.\n", skge->netdev->name);
414 }
415 
416 
xm_link_down(struct skge_hw * hw,int port)417 static void xm_link_down(struct skge_hw *hw, int port)
418 {
419 	struct net_device *dev = hw->dev[port];
420 	struct skge_port *skge = netdev_priv(dev);
421 
422 	xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE);
423 
424 	if (netdev_link_ok(dev))
425 		skge_link_down(skge);
426 }
427 
__xm_phy_read(struct skge_hw * hw,int port,u16 reg,u16 * val)428 static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
429 {
430 	int i;
431 
432 	xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
433 	*val = xm_read16(hw, port, XM_PHY_DATA);
434 
435 	if (hw->phy_type == SK_PHY_XMAC)
436 		goto ready;
437 
438 	for (i = 0; i < PHY_RETRIES; i++) {
439 		if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY)
440 			goto ready;
441 		udelay(1);
442 	}
443 
444 	return -ETIMEDOUT;
445  ready:
446 	*val = xm_read16(hw, port, XM_PHY_DATA);
447 
448 	return 0;
449 }
450 
xm_phy_read(struct skge_hw * hw,int port,u16 reg)451 static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
452 {
453 	u16 v = 0;
454 	if (__xm_phy_read(hw, port, reg, &v))
455 		DBG(PFX "%s: phy read timed out\n",
456 		       hw->dev[port]->name);
457 	return v;
458 }
459 
xm_phy_write(struct skge_hw * hw,int port,u16 reg,u16 val)460 static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
461 {
462 	int i;
463 
464 	xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
465 	for (i = 0; i < PHY_RETRIES; i++) {
466 		if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
467 			goto ready;
468 		udelay(1);
469 	}
470 	return -EIO;
471 
472  ready:
473 	xm_write16(hw, port, XM_PHY_DATA, val);
474 	for (i = 0; i < PHY_RETRIES; i++) {
475 		if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
476 			return 0;
477 		udelay(1);
478 	}
479 	return -ETIMEDOUT;
480 }
481 
genesis_init(struct skge_hw * hw)482 static void genesis_init(struct skge_hw *hw)
483 {
484 	/* set blink source counter */
485 	skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100);
486 	skge_write8(hw, B2_BSC_CTRL, BSC_START);
487 
488 	/* configure mac arbiter */
489 	skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
490 
491 	/* configure mac arbiter timeout values */
492 	skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53);
493 	skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53);
494 	skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53);
495 	skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53);
496 
497 	skge_write8(hw, B3_MA_RCINI_RX1, 0);
498 	skge_write8(hw, B3_MA_RCINI_RX2, 0);
499 	skge_write8(hw, B3_MA_RCINI_TX1, 0);
500 	skge_write8(hw, B3_MA_RCINI_TX2, 0);
501 
502 	/* configure packet arbiter timeout */
503 	skge_write16(hw, B3_PA_CTRL, PA_RST_CLR);
504 	skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX);
505 	skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX);
506 	skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX);
507 	skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX);
508 }
509 
genesis_reset(struct skge_hw * hw,int port)510 static void genesis_reset(struct skge_hw *hw, int port)
511 {
512 	const u8 zero[8]  = { 0 };
513 	u32 reg;
514 
515 	skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
516 
517 	/* reset the statistics module */
518 	xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
519 	xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE);
520 	xm_write32(hw, port, XM_MODE, 0);		/* clear Mode Reg */
521 	xm_write16(hw, port, XM_TX_CMD, 0);	/* reset TX CMD Reg */
522 	xm_write16(hw, port, XM_RX_CMD, 0);	/* reset RX CMD Reg */
523 
524 	/* disable Broadcom PHY IRQ */
525 	if (hw->phy_type == SK_PHY_BCOM)
526 		xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
527 
528 	xm_outhash(hw, port, XM_HSM, zero);
529 
530 	/* Flush TX and RX fifo */
531 	reg = xm_read32(hw, port, XM_MODE);
532 	xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF);
533 	xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF);
534 }
535 
536 
537 /* Convert mode to MII values  */
538 static const u16 phy_pause_map[] = {
539 	[FLOW_MODE_NONE] =	0,
540 	[FLOW_MODE_LOC_SEND] =	PHY_AN_PAUSE_ASYM,
541 	[FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP,
542 	[FLOW_MODE_SYM_OR_REM]  = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
543 };
544 
545 /* special defines for FIBER (88E1011S only) */
546 static const u16 fiber_pause_map[] = {
547 	[FLOW_MODE_NONE]	= PHY_X_P_NO_PAUSE,
548 	[FLOW_MODE_LOC_SEND]	= PHY_X_P_ASYM_MD,
549 	[FLOW_MODE_SYMMETRIC]	= PHY_X_P_SYM_MD,
550 	[FLOW_MODE_SYM_OR_REM]	= PHY_X_P_BOTH_MD,
551 };
552 
553 
554 /* Check status of Broadcom phy link */
bcom_check_link(struct skge_hw * hw,int port)555 static void bcom_check_link(struct skge_hw *hw, int port)
556 {
557 	struct net_device *dev = hw->dev[port];
558 	struct skge_port *skge = netdev_priv(dev);
559 	u16 status;
560 
561 	/* read twice because of latch */
562 	xm_phy_read(hw, port, PHY_BCOM_STAT);
563 	status = xm_phy_read(hw, port, PHY_BCOM_STAT);
564 
565 	if ((status & PHY_ST_LSYNC) == 0) {
566 		xm_link_down(hw, port);
567 		return;
568 	}
569 
570 	if (skge->autoneg == AUTONEG_ENABLE) {
571 		u16 lpa, aux;
572 
573 		if (!(status & PHY_ST_AN_OVER))
574 			return;
575 
576 		lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
577 		if (lpa & PHY_B_AN_RF) {
578 			DBG(PFX "%s: remote fault\n",
579 			       dev->name);
580 			return;
581 		}
582 
583 		aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT);
584 
585 		/* Check Duplex mismatch */
586 		switch (aux & PHY_B_AS_AN_RES_MSK) {
587 		case PHY_B_RES_1000FD:
588 			skge->duplex = DUPLEX_FULL;
589 			break;
590 		case PHY_B_RES_1000HD:
591 			skge->duplex = DUPLEX_HALF;
592 			break;
593 		default:
594 			DBG(PFX "%s: duplex mismatch\n",
595 			       dev->name);
596 			return;
597 		}
598 
599 		/* We are using IEEE 802.3z/D5.0 Table 37-4 */
600 		switch (aux & PHY_B_AS_PAUSE_MSK) {
601 		case PHY_B_AS_PAUSE_MSK:
602 			skge->flow_status = FLOW_STAT_SYMMETRIC;
603 			break;
604 		case PHY_B_AS_PRR:
605 			skge->flow_status = FLOW_STAT_REM_SEND;
606 			break;
607 		case PHY_B_AS_PRT:
608 			skge->flow_status = FLOW_STAT_LOC_SEND;
609 			break;
610 		default:
611 			skge->flow_status = FLOW_STAT_NONE;
612 		}
613 		skge->speed = SPEED_1000;
614 	}
615 
616 	if (!netdev_link_ok(dev))
617 		genesis_link_up(skge);
618 }
619 
620 /* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional
621  * Phy on for 100 or 10Mbit operation
622  */
bcom_phy_init(struct skge_port * skge)623 static void bcom_phy_init(struct skge_port *skge)
624 {
625 	struct skge_hw *hw = skge->hw;
626 	int port = skge->port;
627 	unsigned int i;
628 	u16 id1, r, ext, ctl;
629 
630 	/* magic workaround patterns for Broadcom */
631 	static const struct {
632 		u16 reg;
633 		u16 val;
634 	} A1hack[] = {
635 		{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 },
636 		{ 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 },
637 		{ 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 },
638 		{ 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
639 	}, C0hack[] = {
640 		{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 },
641 		{ 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
642 	};
643 
644 	/* read Id from external PHY (all have the same address) */
645 	id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
646 
647 	/* Optimize MDIO transfer by suppressing preamble. */
648 	r = xm_read16(hw, port, XM_MMU_CMD);
649 	r |=  XM_MMU_NO_PRE;
650 	xm_write16(hw, port, XM_MMU_CMD,r);
651 
652 	switch (id1) {
653 	case PHY_BCOM_ID1_C0:
654 		/*
655 		 * Workaround BCOM Errata for the C0 type.
656 		 * Write magic patterns to reserved registers.
657 		 */
658 		for (i = 0; i < ARRAY_SIZE(C0hack); i++)
659 			xm_phy_write(hw, port,
660 				     C0hack[i].reg, C0hack[i].val);
661 
662 		break;
663 	case PHY_BCOM_ID1_A1:
664 		/*
665 		 * Workaround BCOM Errata for the A1 type.
666 		 * Write magic patterns to reserved registers.
667 		 */
668 		for (i = 0; i < ARRAY_SIZE(A1hack); i++)
669 			xm_phy_write(hw, port,
670 				     A1hack[i].reg, A1hack[i].val);
671 		break;
672 	}
673 
674 	/*
675 	 * Workaround BCOM Errata (#10523) for all BCom PHYs.
676 	 * Disable Power Management after reset.
677 	 */
678 	r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL);
679 	r |= PHY_B_AC_DIS_PM;
680 	xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r);
681 
682 	/* Dummy read */
683 	xm_read16(hw, port, XM_ISRC);
684 
685 	ext = PHY_B_PEC_EN_LTR; /* enable tx led */
686 	ctl = PHY_CT_SP1000;	/* always 1000mbit */
687 
688 	if (skge->autoneg == AUTONEG_ENABLE) {
689 		/*
690 		 * Workaround BCOM Errata #1 for the C5 type.
691 		 * 1000Base-T Link Acquisition Failure in Slave Mode
692 		 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
693 		 */
694 		u16 adv = PHY_B_1000C_RD;
695 		if (skge->advertising & ADVERTISED_1000baseT_Half)
696 			adv |= PHY_B_1000C_AHD;
697 		if (skge->advertising & ADVERTISED_1000baseT_Full)
698 			adv |= PHY_B_1000C_AFD;
699 		xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv);
700 
701 		ctl |= PHY_CT_ANE | PHY_CT_RE_CFG;
702 	} else {
703 		if (skge->duplex == DUPLEX_FULL)
704 			ctl |= PHY_CT_DUP_MD;
705 		/* Force to slave */
706 		xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE);
707 	}
708 
709 	/* Set autonegotiation pause parameters */
710 	xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV,
711 		     phy_pause_map[skge->flow_control] | PHY_AN_CSMA);
712 
713 	xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext);
714 	xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl);
715 
716 	/* Use link status change interrupt */
717 	xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
718 }
719 
xm_phy_init(struct skge_port * skge)720 static void xm_phy_init(struct skge_port *skge)
721 {
722 	struct skge_hw *hw = skge->hw;
723 	int port = skge->port;
724 	u16 ctrl = 0;
725 
726 	if (skge->autoneg == AUTONEG_ENABLE) {
727 		if (skge->advertising & ADVERTISED_1000baseT_Half)
728 			ctrl |= PHY_X_AN_HD;
729 		if (skge->advertising & ADVERTISED_1000baseT_Full)
730 			ctrl |= PHY_X_AN_FD;
731 
732 		ctrl |= fiber_pause_map[skge->flow_control];
733 
734 		xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl);
735 
736 		/* Restart Auto-negotiation */
737 		ctrl = PHY_CT_ANE | PHY_CT_RE_CFG;
738 	} else {
739 		/* Set DuplexMode in Config register */
740 		if (skge->duplex == DUPLEX_FULL)
741 			ctrl |= PHY_CT_DUP_MD;
742 		/*
743 		 * Do NOT enable Auto-negotiation here. This would hold
744 		 * the link down because no IDLEs are transmitted
745 		 */
746 	}
747 
748 	xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl);
749 
750 	/* Poll PHY for status changes */
751 	skge->use_xm_link_timer = 1;
752 }
753 
xm_check_link(struct net_device * dev)754 static int xm_check_link(struct net_device *dev)
755 {
756 	struct skge_port *skge = netdev_priv(dev);
757 	struct skge_hw *hw = skge->hw;
758 	int port = skge->port;
759 	u16 status;
760 
761 	/* read twice because of latch */
762 	xm_phy_read(hw, port, PHY_XMAC_STAT);
763 	status = xm_phy_read(hw, port, PHY_XMAC_STAT);
764 
765 	if ((status & PHY_ST_LSYNC) == 0) {
766 		xm_link_down(hw, port);
767 		return 0;
768 	}
769 
770 	if (skge->autoneg == AUTONEG_ENABLE) {
771 		u16 lpa, res;
772 
773 		if (!(status & PHY_ST_AN_OVER))
774 			return 0;
775 
776 		lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
777 		if (lpa & PHY_B_AN_RF) {
778 			DBG(PFX "%s: remote fault\n",
779 			       dev->name);
780 			return 0;
781 		}
782 
783 		res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI);
784 
785 		/* Check Duplex mismatch */
786 		switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) {
787 		case PHY_X_RS_FD:
788 			skge->duplex = DUPLEX_FULL;
789 			break;
790 		case PHY_X_RS_HD:
791 			skge->duplex = DUPLEX_HALF;
792 			break;
793 		default:
794 			DBG(PFX "%s: duplex mismatch\n",
795 			       dev->name);
796 			return 0;
797 		}
798 
799 		/* We are using IEEE 802.3z/D5.0 Table 37-4 */
800 		if ((skge->flow_control == FLOW_MODE_SYMMETRIC ||
801 		     skge->flow_control == FLOW_MODE_SYM_OR_REM) &&
802 		    (lpa & PHY_X_P_SYM_MD))
803 			skge->flow_status = FLOW_STAT_SYMMETRIC;
804 		else if (skge->flow_control == FLOW_MODE_SYM_OR_REM &&
805 			 (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD)
806 			/* Enable PAUSE receive, disable PAUSE transmit */
807 			skge->flow_status  = FLOW_STAT_REM_SEND;
808 		else if (skge->flow_control == FLOW_MODE_LOC_SEND &&
809 			 (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD)
810 			/* Disable PAUSE receive, enable PAUSE transmit */
811 			skge->flow_status = FLOW_STAT_LOC_SEND;
812 		else
813 			skge->flow_status = FLOW_STAT_NONE;
814 
815 		skge->speed = SPEED_1000;
816 	}
817 
818 	if (!netdev_link_ok(dev))
819 		genesis_link_up(skge);
820 	return 1;
821 }
822 
823 /* Poll to check for link coming up.
824  *
825  * Since internal PHY is wired to a level triggered pin, can't
826  * get an interrupt when carrier is detected, need to poll for
827  * link coming up.
828  */
xm_link_timer(struct skge_port * skge)829 static void xm_link_timer(struct skge_port *skge)
830 {
831 	struct net_device *dev = skge->netdev;
832 	struct skge_hw *hw = skge->hw;
833 	int port = skge->port;
834 	int i;
835 
836 	/*
837 	 * Verify that the link by checking GPIO register three times.
838 	 * This pin has the signal from the link_sync pin connected to it.
839 	 */
840 	for (i = 0; i < 3; i++) {
841 		if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS)
842 			return;
843 	}
844 
845         /* Re-enable interrupt to detect link down */
846 	if (xm_check_link(dev)) {
847 		u16 msk = xm_read16(hw, port, XM_IMSK);
848 		msk &= ~XM_IS_INP_ASS;
849 		xm_write16(hw, port, XM_IMSK, msk);
850 		xm_read16(hw, port, XM_ISRC);
851 	}
852 }
853 
genesis_mac_init(struct skge_hw * hw,int port)854 static void genesis_mac_init(struct skge_hw *hw, int port)
855 {
856 	struct net_device *dev = hw->dev[port];
857 	struct skge_port *skge = netdev_priv(dev);
858 	int i;
859 	u32 r;
860 	const u8 zero[6]  = { 0 };
861 
862 	for (i = 0; i < 10; i++) {
863 		skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
864 			     MFF_SET_MAC_RST);
865 		if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)
866 			goto reset_ok;
867 		udelay(1);
868 	}
869 
870 	DBG(PFX "%s: genesis reset failed\n", dev->name);
871 
872  reset_ok:
873 	/* Unreset the XMAC. */
874 	skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
875 
876 	/*
877 	 * Perform additional initialization for external PHYs,
878 	 * namely for the 1000baseTX cards that use the XMAC's
879 	 * GMII mode.
880 	 */
881 	if (hw->phy_type != SK_PHY_XMAC) {
882 		/* Take external Phy out of reset */
883 		r = skge_read32(hw, B2_GP_IO);
884 		if (port == 0)
885 			r |= GP_DIR_0|GP_IO_0;
886 		else
887 			r |= GP_DIR_2|GP_IO_2;
888 
889 		skge_write32(hw, B2_GP_IO, r);
890 
891 		/* Enable GMII interface */
892 		xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
893 	}
894 
895 
896 	switch(hw->phy_type) {
897 	case SK_PHY_XMAC:
898 		xm_phy_init(skge);
899 		break;
900 	case SK_PHY_BCOM:
901 		bcom_phy_init(skge);
902 		bcom_check_link(hw, port);
903 	}
904 
905 	/* Set Station Address */
906 	xm_outaddr(hw, port, XM_SA, dev->ll_addr);
907 
908 	/* We don't use match addresses so clear */
909 	for (i = 1; i < 16; i++)
910 		xm_outaddr(hw, port, XM_EXM(i), zero);
911 
912 	/* Clear MIB counters */
913 	xm_write16(hw, port, XM_STAT_CMD,
914 			XM_SC_CLR_RXC | XM_SC_CLR_TXC);
915 	/* Clear two times according to Errata #3 */
916 	xm_write16(hw, port, XM_STAT_CMD,
917 			XM_SC_CLR_RXC | XM_SC_CLR_TXC);
918 
919 	/* configure Rx High Water Mark (XM_RX_HI_WM) */
920 	xm_write16(hw, port, XM_RX_HI_WM, 1450);
921 
922 	/* We don't need the FCS appended to the packet. */
923 	r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS;
924 
925 	if (skge->duplex == DUPLEX_HALF) {
926 		/*
927 		 * If in manual half duplex mode the other side might be in
928 		 * full duplex mode, so ignore if a carrier extension is not seen
929 		 * on frames received
930 		 */
931 		r |= XM_RX_DIS_CEXT;
932 	}
933 	xm_write16(hw, port, XM_RX_CMD, r);
934 
935 	/* We want short frames padded to 60 bytes. */
936 	xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD);
937 
938 	xm_write16(hw, port, XM_TX_THR, 512);
939 
940 	/*
941 	 * Enable the reception of all error frames. This is is
942 	 * a necessary evil due to the design of the XMAC. The
943 	 * XMAC's receive FIFO is only 8K in size, however jumbo
944 	 * frames can be up to 9000 bytes in length. When bad
945 	 * frame filtering is enabled, the XMAC's RX FIFO operates
946 	 * in 'store and forward' mode. For this to work, the
947 	 * entire frame has to fit into the FIFO, but that means
948 	 * that jumbo frames larger than 8192 bytes will be
949 	 * truncated. Disabling all bad frame filtering causes
950 	 * the RX FIFO to operate in streaming mode, in which
951 	 * case the XMAC will start transferring frames out of the
952 	 * RX FIFO as soon as the FIFO threshold is reached.
953 	 */
954 	xm_write32(hw, port, XM_MODE, XM_DEF_MODE);
955 
956 
957 	/*
958 	 * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
959 	 *	- Enable all bits excepting 'Octets Rx OK Low CntOv'
960 	 *	  and 'Octets Rx OK Hi Cnt Ov'.
961 	 */
962 	xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK);
963 
964 	/*
965 	 * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
966 	 *	- Enable all bits excepting 'Octets Tx OK Low CntOv'
967 	 *	  and 'Octets Tx OK Hi Cnt Ov'.
968 	 */
969 	xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK);
970 
971 	/* Configure MAC arbiter */
972 	skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
973 
974 	/* configure timeout values */
975 	skge_write8(hw, B3_MA_TOINI_RX1, 72);
976 	skge_write8(hw, B3_MA_TOINI_RX2, 72);
977 	skge_write8(hw, B3_MA_TOINI_TX1, 72);
978 	skge_write8(hw, B3_MA_TOINI_TX2, 72);
979 
980 	skge_write8(hw, B3_MA_RCINI_RX1, 0);
981 	skge_write8(hw, B3_MA_RCINI_RX2, 0);
982 	skge_write8(hw, B3_MA_RCINI_TX1, 0);
983 	skge_write8(hw, B3_MA_RCINI_TX2, 0);
984 
985 	/* Configure Rx MAC FIFO */
986 	skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR);
987 	skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT);
988 	skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD);
989 
990 	/* Configure Tx MAC FIFO */
991 	skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR);
992 	skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF);
993 	skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD);
994 
995 	/* enable timeout timers */
996 	skge_write16(hw, B3_PA_CTRL,
997 		     (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2);
998 }
999 
genesis_stop(struct skge_port * skge)1000 static void genesis_stop(struct skge_port *skge)
1001 {
1002 	struct skge_hw *hw = skge->hw;
1003 	int port = skge->port;
1004 	unsigned retries = 1000;
1005 	u16 cmd;
1006 
1007 	/* Disable Tx and Rx */
1008 	cmd = xm_read16(hw, port, XM_MMU_CMD);
1009 	cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1010 	xm_write16(hw, port, XM_MMU_CMD, cmd);
1011 
1012 	genesis_reset(hw, port);
1013 
1014 	/* Clear Tx packet arbiter timeout IRQ */
1015 	skge_write16(hw, B3_PA_CTRL,
1016 		     port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
1017 
1018 	/* Reset the MAC */
1019 	skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
1020 	do {
1021 		skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST);
1022 		if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST))
1023 			break;
1024 	} while (--retries > 0);
1025 
1026 	/* For external PHYs there must be special handling */
1027 	if (hw->phy_type != SK_PHY_XMAC) {
1028 		u32 reg = skge_read32(hw, B2_GP_IO);
1029 		if (port == 0) {
1030 			reg |= GP_DIR_0;
1031 			reg &= ~GP_IO_0;
1032 		} else {
1033 			reg |= GP_DIR_2;
1034 			reg &= ~GP_IO_2;
1035 		}
1036 		skge_write32(hw, B2_GP_IO, reg);
1037 		skge_read32(hw, B2_GP_IO);
1038 	}
1039 
1040 	xm_write16(hw, port, XM_MMU_CMD,
1041 			xm_read16(hw, port, XM_MMU_CMD)
1042 			& ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
1043 
1044 	xm_read16(hw, port, XM_MMU_CMD);
1045 }
1046 
genesis_link_up(struct skge_port * skge)1047 static void genesis_link_up(struct skge_port *skge)
1048 {
1049 	struct skge_hw *hw = skge->hw;
1050 	int port = skge->port;
1051 	u16 cmd, msk;
1052 	u32 mode;
1053 
1054 	cmd = xm_read16(hw, port, XM_MMU_CMD);
1055 
1056 	/*
1057 	 * enabling pause frame reception is required for 1000BT
1058 	 * because the XMAC is not reset if the link is going down
1059 	 */
1060 	if (skge->flow_status == FLOW_STAT_NONE ||
1061 	    skge->flow_status == FLOW_STAT_LOC_SEND)
1062 		/* Disable Pause Frame Reception */
1063 		cmd |= XM_MMU_IGN_PF;
1064 	else
1065 		/* Enable Pause Frame Reception */
1066 		cmd &= ~XM_MMU_IGN_PF;
1067 
1068 	xm_write16(hw, port, XM_MMU_CMD, cmd);
1069 
1070 	mode = xm_read32(hw, port, XM_MODE);
1071 	if (skge->flow_status== FLOW_STAT_SYMMETRIC ||
1072 	    skge->flow_status == FLOW_STAT_LOC_SEND) {
1073 		/*
1074 		 * Configure Pause Frame Generation
1075 		 * Use internal and external Pause Frame Generation.
1076 		 * Sending pause frames is edge triggered.
1077 		 * Send a Pause frame with the maximum pause time if
1078 		 * internal oder external FIFO full condition occurs.
1079 		 * Send a zero pause time frame to re-start transmission.
1080 		 */
1081 		/* XM_PAUSE_DA = '010000C28001' (default) */
1082 		/* XM_MAC_PTIME = 0xffff (maximum) */
1083 		/* remember this value is defined in big endian (!) */
1084 		xm_write16(hw, port, XM_MAC_PTIME, 0xffff);
1085 
1086 		mode |= XM_PAUSE_MODE;
1087 		skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE);
1088 	} else {
1089 		/*
1090 		 * disable pause frame generation is required for 1000BT
1091 		 * because the XMAC is not reset if the link is going down
1092 		 */
1093 		/* Disable Pause Mode in Mode Register */
1094 		mode &= ~XM_PAUSE_MODE;
1095 
1096 		skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE);
1097 	}
1098 
1099 	xm_write32(hw, port, XM_MODE, mode);
1100 
1101 	/* Turn on detection of Tx underrun */
1102 	msk = xm_read16(hw, port, XM_IMSK);
1103 	msk &= ~XM_IS_TXF_UR;
1104 	xm_write16(hw, port, XM_IMSK, msk);
1105 
1106 	xm_read16(hw, port, XM_ISRC);
1107 
1108 	/* get MMU Command Reg. */
1109 	cmd = xm_read16(hw, port, XM_MMU_CMD);
1110 	if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL)
1111 		cmd |= XM_MMU_GMII_FD;
1112 
1113 	/*
1114 	 * Workaround BCOM Errata (#10523) for all BCom Phys
1115 	 * Enable Power Management after link up
1116 	 */
1117 	if (hw->phy_type == SK_PHY_BCOM) {
1118 		xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1119 			     xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL)
1120 			     & ~PHY_B_AC_DIS_PM);
1121 		xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
1122 	}
1123 
1124 	/* enable Rx/Tx */
1125 	xm_write16(hw, port, XM_MMU_CMD,
1126 			cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1127 	skge_link_up(skge);
1128 }
1129 
1130 
bcom_phy_intr(struct skge_port * skge)1131 static inline void bcom_phy_intr(struct skge_port *skge)
1132 {
1133 	struct skge_hw *hw = skge->hw;
1134 	int port = skge->port;
1135 	u16 isrc;
1136 
1137 	isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
1138 	DBGIO(PFX "%s: phy interrupt status 0x%x\n",
1139 	     skge->netdev->name, isrc);
1140 
1141 	if (isrc & PHY_B_IS_PSE)
1142 		DBG(PFX "%s: uncorrectable pair swap error\n",
1143 		    hw->dev[port]->name);
1144 
1145 	/* Workaround BCom Errata:
1146 	 *	enable and disable loopback mode if "NO HCD" occurs.
1147 	 */
1148 	if (isrc & PHY_B_IS_NO_HDCL) {
1149 		u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL);
1150 		xm_phy_write(hw, port, PHY_BCOM_CTRL,
1151 				  ctrl | PHY_CT_LOOP);
1152 		xm_phy_write(hw, port, PHY_BCOM_CTRL,
1153 				  ctrl & ~PHY_CT_LOOP);
1154 	}
1155 
1156 	if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
1157 		bcom_check_link(hw, port);
1158 
1159 }
1160 
gm_phy_write(struct skge_hw * hw,int port,u16 reg,u16 val)1161 static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
1162 {
1163 	int i;
1164 
1165 	gma_write16(hw, port, GM_SMI_DATA, val);
1166 	gma_write16(hw, port, GM_SMI_CTRL,
1167 			 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
1168 	for (i = 0; i < PHY_RETRIES; i++) {
1169 		udelay(1);
1170 
1171 		if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
1172 			return 0;
1173 	}
1174 
1175 	DBG(PFX "%s: phy write timeout port %x reg %x val %x\n",
1176 	    hw->dev[port]->name,
1177 	    port, reg, val);
1178 	return -EIO;
1179 }
1180 
__gm_phy_read(struct skge_hw * hw,int port,u16 reg,u16 * val)1181 static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
1182 {
1183 	int i;
1184 
1185 	gma_write16(hw, port, GM_SMI_CTRL,
1186 			 GM_SMI_CT_PHY_AD(hw->phy_addr)
1187 			 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
1188 
1189 	for (i = 0; i < PHY_RETRIES; i++) {
1190 		udelay(1);
1191 		if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
1192 			goto ready;
1193 	}
1194 
1195 	return -ETIMEDOUT;
1196  ready:
1197 	*val = gma_read16(hw, port, GM_SMI_DATA);
1198 	return 0;
1199 }
1200 
gm_phy_read(struct skge_hw * hw,int port,u16 reg)1201 static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
1202 {
1203 	u16 v = 0;
1204 	if (__gm_phy_read(hw, port, reg, &v))
1205 		DBG(PFX "%s: phy read timeout port %x reg %x val %x\n",
1206 	       hw->dev[port]->name,
1207 	       port, reg, v);
1208 	return v;
1209 }
1210 
1211 /* Marvell Phy Initialization */
yukon_init(struct skge_hw * hw,int port)1212 static void yukon_init(struct skge_hw *hw, int port)
1213 {
1214 	struct skge_port *skge = netdev_priv(hw->dev[port]);
1215 	u16 ctrl, ct1000, adv;
1216 
1217 	if (skge->autoneg == AUTONEG_ENABLE) {
1218 		u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
1219 
1220 		ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
1221 			  PHY_M_EC_MAC_S_MSK);
1222 		ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
1223 
1224 		ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1225 
1226 		gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
1227 	}
1228 
1229 	ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1230 	if (skge->autoneg == AUTONEG_DISABLE)
1231 		ctrl &= ~PHY_CT_ANE;
1232 
1233 	ctrl |= PHY_CT_RESET;
1234 	gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1235 
1236 	ctrl = 0;
1237 	ct1000 = 0;
1238 	adv = PHY_AN_CSMA;
1239 
1240 	if (skge->autoneg == AUTONEG_ENABLE) {
1241 		if (hw->copper) {
1242 			if (skge->advertising & ADVERTISED_1000baseT_Full)
1243 				ct1000 |= PHY_M_1000C_AFD;
1244 			if (skge->advertising & ADVERTISED_1000baseT_Half)
1245 				ct1000 |= PHY_M_1000C_AHD;
1246 			if (skge->advertising & ADVERTISED_100baseT_Full)
1247 				adv |= PHY_M_AN_100_FD;
1248 			if (skge->advertising & ADVERTISED_100baseT_Half)
1249 				adv |= PHY_M_AN_100_HD;
1250 			if (skge->advertising & ADVERTISED_10baseT_Full)
1251 				adv |= PHY_M_AN_10_FD;
1252 			if (skge->advertising & ADVERTISED_10baseT_Half)
1253 				adv |= PHY_M_AN_10_HD;
1254 
1255 			/* Set Flow-control capabilities */
1256 			adv |= phy_pause_map[skge->flow_control];
1257 		} else {
1258 			if (skge->advertising & ADVERTISED_1000baseT_Full)
1259 				adv |= PHY_M_AN_1000X_AFD;
1260 			if (skge->advertising & ADVERTISED_1000baseT_Half)
1261 				adv |= PHY_M_AN_1000X_AHD;
1262 
1263 			adv |= fiber_pause_map[skge->flow_control];
1264 		}
1265 
1266 		/* Restart Auto-negotiation */
1267 		ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1268 	} else {
1269 		/* forced speed/duplex settings */
1270 		ct1000 = PHY_M_1000C_MSE;
1271 
1272 		if (skge->duplex == DUPLEX_FULL)
1273 			ctrl |= PHY_CT_DUP_MD;
1274 
1275 		switch (skge->speed) {
1276 		case SPEED_1000:
1277 			ctrl |= PHY_CT_SP1000;
1278 			break;
1279 		case SPEED_100:
1280 			ctrl |= PHY_CT_SP100;
1281 			break;
1282 		}
1283 
1284 		ctrl |= PHY_CT_RESET;
1285 	}
1286 
1287 	gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
1288 
1289 	gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
1290 	gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1291 
1292 	/* Enable phy interrupt on autonegotiation complete (or link up) */
1293 	if (skge->autoneg == AUTONEG_ENABLE)
1294 		gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK);
1295 	else
1296 		gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
1297 }
1298 
yukon_reset(struct skge_hw * hw,int port)1299 static void yukon_reset(struct skge_hw *hw, int port)
1300 {
1301 	gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */
1302 	gma_write16(hw, port, GM_MC_ADDR_H1, 0);	/* clear MC hash */
1303 	gma_write16(hw, port, GM_MC_ADDR_H2, 0);
1304 	gma_write16(hw, port, GM_MC_ADDR_H3, 0);
1305 	gma_write16(hw, port, GM_MC_ADDR_H4, 0);
1306 
1307 	gma_write16(hw, port, GM_RX_CTRL,
1308 			 gma_read16(hw, port, GM_RX_CTRL)
1309 			 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
1310 }
1311 
1312 /* Apparently, early versions of Yukon-Lite had wrong chip_id? */
is_yukon_lite_a0(struct skge_hw * hw)1313 static int is_yukon_lite_a0(struct skge_hw *hw)
1314 {
1315 	u32 reg;
1316 	int ret;
1317 
1318 	if (hw->chip_id != CHIP_ID_YUKON)
1319 		return 0;
1320 
1321 	reg = skge_read32(hw, B2_FAR);
1322 	skge_write8(hw, B2_FAR + 3, 0xff);
1323 	ret = (skge_read8(hw, B2_FAR + 3) != 0);
1324 	skge_write32(hw, B2_FAR, reg);
1325 	return ret;
1326 }
1327 
yukon_mac_init(struct skge_hw * hw,int port)1328 static void yukon_mac_init(struct skge_hw *hw, int port)
1329 {
1330 	struct skge_port *skge = netdev_priv(hw->dev[port]);
1331 	int i;
1332 	u32 reg;
1333 	const u8 *addr = hw->dev[port]->ll_addr;
1334 
1335 	/* WA code for COMA mode -- set PHY reset */
1336 	if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1337 	    hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1338 		reg = skge_read32(hw, B2_GP_IO);
1339 		reg |= GP_DIR_9 | GP_IO_9;
1340 		skge_write32(hw, B2_GP_IO, reg);
1341 	}
1342 
1343 	/* hard reset */
1344 	skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1345 	skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1346 
1347 	/* WA code for COMA mode -- clear PHY reset */
1348 	if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1349 	    hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1350 		reg = skge_read32(hw, B2_GP_IO);
1351 		reg |= GP_DIR_9;
1352 		reg &= ~GP_IO_9;
1353 		skge_write32(hw, B2_GP_IO, reg);
1354 	}
1355 
1356 	/* Set hardware config mode */
1357 	reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
1358 		GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE;
1359 	reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
1360 
1361 	/* Clear GMC reset */
1362 	skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
1363 	skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
1364 	skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
1365 
1366 	if (skge->autoneg == AUTONEG_DISABLE) {
1367 		reg = GM_GPCR_AU_ALL_DIS;
1368 		gma_write16(hw, port, GM_GP_CTRL,
1369 				 gma_read16(hw, port, GM_GP_CTRL) | reg);
1370 
1371 		switch (skge->speed) {
1372 		case SPEED_1000:
1373 			reg &= ~GM_GPCR_SPEED_100;
1374 			reg |= GM_GPCR_SPEED_1000;
1375 			break;
1376 		case SPEED_100:
1377 			reg &= ~GM_GPCR_SPEED_1000;
1378 			reg |= GM_GPCR_SPEED_100;
1379 			break;
1380 		case SPEED_10:
1381 			reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
1382 			break;
1383 		}
1384 
1385 		if (skge->duplex == DUPLEX_FULL)
1386 			reg |= GM_GPCR_DUP_FULL;
1387 	} else
1388 		reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
1389 
1390 	switch (skge->flow_control) {
1391 	case FLOW_MODE_NONE:
1392 		skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1393 		reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1394 		break;
1395 	case FLOW_MODE_LOC_SEND:
1396 		/* disable Rx flow-control */
1397 		reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1398 		break;
1399 	case FLOW_MODE_SYMMETRIC:
1400 	case FLOW_MODE_SYM_OR_REM:
1401 		/* enable Tx & Rx flow-control */
1402 		break;
1403 	}
1404 
1405 	gma_write16(hw, port, GM_GP_CTRL, reg);
1406 	skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
1407 
1408 	yukon_init(hw, port);
1409 
1410 	/* MIB clear */
1411 	reg = gma_read16(hw, port, GM_PHY_ADDR);
1412 	gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
1413 
1414 	for (i = 0; i < GM_MIB_CNT_SIZE; i++)
1415 		gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
1416 	gma_write16(hw, port, GM_PHY_ADDR, reg);
1417 
1418 	/* transmit control */
1419 	gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
1420 
1421 	/* receive control reg: unicast + multicast + no FCS  */
1422 	gma_write16(hw, port, GM_RX_CTRL,
1423 			 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
1424 
1425 	/* transmit flow control */
1426 	gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
1427 
1428 	/* transmit parameter */
1429 	gma_write16(hw, port, GM_TX_PARAM,
1430 			 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
1431 			 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
1432 			 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF));
1433 
1434 	/* configure the Serial Mode Register */
1435 	reg = DATA_BLIND_VAL(DATA_BLIND_DEF)
1436 		| GM_SMOD_VLAN_ENA
1437 		| IPG_DATA_VAL(IPG_DATA_DEF);
1438 
1439 	gma_write16(hw, port, GM_SERIAL_MODE, reg);
1440 
1441 	/* physical address: used for pause frames */
1442 	gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
1443 	/* virtual address for data */
1444 	gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
1445 
1446 	/* enable interrupt mask for counter overflows */
1447 	gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
1448 	gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
1449 	gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
1450 
1451 	/* Initialize Mac Fifo */
1452 
1453 	/* Configure Rx MAC FIFO */
1454 	skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
1455 	reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
1456 
1457 	/* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
1458 	if (is_yukon_lite_a0(hw))
1459 		reg &= ~GMF_RX_F_FL_ON;
1460 
1461 	skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
1462 	skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
1463 	/*
1464 	 * because Pause Packet Truncation in GMAC is not working
1465 	 * we have to increase the Flush Threshold to 64 bytes
1466 	 * in order to flush pause packets in Rx FIFO on Yukon-1
1467 	 */
1468 	skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
1469 
1470 	/* Configure Tx MAC FIFO */
1471 	skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
1472 	skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
1473 }
1474 
1475 /* Go into power down mode */
yukon_suspend(struct skge_hw * hw,int port)1476 static void yukon_suspend(struct skge_hw *hw, int port)
1477 {
1478 	u16 ctrl;
1479 
1480 	ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
1481 	ctrl |= PHY_M_PC_POL_R_DIS;
1482 	gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
1483 
1484 	ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1485 	ctrl |= PHY_CT_RESET;
1486 	gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1487 
1488 	/* switch IEEE compatible power down mode on */
1489 	ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1490 	ctrl |= PHY_CT_PDOWN;
1491 	gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1492 }
1493 
yukon_stop(struct skge_port * skge)1494 static void yukon_stop(struct skge_port *skge)
1495 {
1496 	struct skge_hw *hw = skge->hw;
1497 	int port = skge->port;
1498 
1499 	skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
1500 	yukon_reset(hw, port);
1501 
1502 	gma_write16(hw, port, GM_GP_CTRL,
1503 			 gma_read16(hw, port, GM_GP_CTRL)
1504 			 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA));
1505 	gma_read16(hw, port, GM_GP_CTRL);
1506 
1507 	yukon_suspend(hw, port);
1508 
1509 	/* set GPHY Control reset */
1510 	skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1511 	skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1512 }
1513 
yukon_speed(const struct skge_hw * hw __unused,u16 aux)1514 static u16 yukon_speed(const struct skge_hw *hw __unused, u16 aux)
1515 {
1516 	switch (aux & PHY_M_PS_SPEED_MSK) {
1517 	case PHY_M_PS_SPEED_1000:
1518 		return SPEED_1000;
1519 	case PHY_M_PS_SPEED_100:
1520 		return SPEED_100;
1521 	default:
1522 		return SPEED_10;
1523 	}
1524 }
1525 
yukon_link_up(struct skge_port * skge)1526 static void yukon_link_up(struct skge_port *skge)
1527 {
1528 	struct skge_hw *hw = skge->hw;
1529 	int port = skge->port;
1530 	u16 reg;
1531 
1532 	/* Enable Transmit FIFO Underrun */
1533 	skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
1534 
1535 	reg = gma_read16(hw, port, GM_GP_CTRL);
1536 	if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
1537 		reg |= GM_GPCR_DUP_FULL;
1538 
1539 	/* enable Rx/Tx */
1540 	reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1541 	gma_write16(hw, port, GM_GP_CTRL, reg);
1542 
1543 	gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
1544 	skge_link_up(skge);
1545 }
1546 
yukon_link_down(struct skge_port * skge)1547 static void yukon_link_down(struct skge_port *skge)
1548 {
1549 	struct skge_hw *hw = skge->hw;
1550 	int port = skge->port;
1551 	u16 ctrl;
1552 
1553 	ctrl = gma_read16(hw, port, GM_GP_CTRL);
1554 	ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
1555 	gma_write16(hw, port, GM_GP_CTRL, ctrl);
1556 
1557 	if (skge->flow_status == FLOW_STAT_REM_SEND) {
1558 		ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
1559 		ctrl |= PHY_M_AN_ASP;
1560 		/* restore Asymmetric Pause bit */
1561 		gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl);
1562 	}
1563 
1564 	skge_link_down(skge);
1565 
1566 	yukon_init(hw, port);
1567 }
1568 
yukon_phy_intr(struct skge_port * skge)1569 static void yukon_phy_intr(struct skge_port *skge)
1570 {
1571 	struct skge_hw *hw = skge->hw;
1572 	int port = skge->port;
1573 	const char *reason = NULL;
1574 	u16 istatus, phystat;
1575 
1576 	istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
1577 	phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
1578 
1579 	DBGIO(PFX "%s: phy interrupt status 0x%x 0x%x\n",
1580 	     skge->netdev->name, istatus, phystat);
1581 
1582 	if (istatus & PHY_M_IS_AN_COMPL) {
1583 		if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
1584 		    & PHY_M_AN_RF) {
1585 			reason = "remote fault";
1586 			goto failed;
1587 		}
1588 
1589 		if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
1590 			reason = "master/slave fault";
1591 			goto failed;
1592 		}
1593 
1594 		if (!(phystat & PHY_M_PS_SPDUP_RES)) {
1595 			reason = "speed/duplex";
1596 			goto failed;
1597 		}
1598 
1599 		skge->duplex = (phystat & PHY_M_PS_FULL_DUP)
1600 			? DUPLEX_FULL : DUPLEX_HALF;
1601 		skge->speed = yukon_speed(hw, phystat);
1602 
1603 		/* We are using IEEE 802.3z/D5.0 Table 37-4 */
1604 		switch (phystat & PHY_M_PS_PAUSE_MSK) {
1605 		case PHY_M_PS_PAUSE_MSK:
1606 			skge->flow_status = FLOW_STAT_SYMMETRIC;
1607 			break;
1608 		case PHY_M_PS_RX_P_EN:
1609 			skge->flow_status = FLOW_STAT_REM_SEND;
1610 			break;
1611 		case PHY_M_PS_TX_P_EN:
1612 			skge->flow_status = FLOW_STAT_LOC_SEND;
1613 			break;
1614 		default:
1615 			skge->flow_status = FLOW_STAT_NONE;
1616 		}
1617 
1618 		if (skge->flow_status == FLOW_STAT_NONE ||
1619 		    (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
1620 			skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1621 		else
1622 			skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
1623 		yukon_link_up(skge);
1624 		return;
1625 	}
1626 
1627 	if (istatus & PHY_M_IS_LSP_CHANGE)
1628 		skge->speed = yukon_speed(hw, phystat);
1629 
1630 	if (istatus & PHY_M_IS_DUP_CHANGE)
1631 		skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1632 	if (istatus & PHY_M_IS_LST_CHANGE) {
1633 		if (phystat & PHY_M_PS_LINK_UP)
1634 			yukon_link_up(skge);
1635 		else
1636 			yukon_link_down(skge);
1637 	}
1638 	return;
1639  failed:
1640 	DBG(PFX "%s: autonegotiation failed (%s)\n",
1641 	       skge->netdev->name, reason);
1642 
1643 	/* XXX restart autonegotiation? */
1644 }
1645 
skge_ramset(struct skge_hw * hw,u16 q,u32 start,size_t len)1646 static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
1647 {
1648 	u32 end;
1649 
1650 	start /= 8;
1651 	len /= 8;
1652 	end = start + len - 1;
1653 
1654 	skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
1655 	skge_write32(hw, RB_ADDR(q, RB_START), start);
1656 	skge_write32(hw, RB_ADDR(q, RB_WP), start);
1657 	skge_write32(hw, RB_ADDR(q, RB_RP), start);
1658 	skge_write32(hw, RB_ADDR(q, RB_END), end);
1659 
1660 	if (q == Q_R1 || q == Q_R2) {
1661 		/* Set thresholds on receive queue's */
1662 		skge_write32(hw, RB_ADDR(q, RB_RX_UTPP),
1663 			     start + (2*len)/3);
1664 		skge_write32(hw, RB_ADDR(q, RB_RX_LTPP),
1665 			     start + (len/3));
1666 	} else {
1667 		/* Enable store & forward on Tx queue's because
1668 		 * Tx FIFO is only 4K on Genesis and 1K on Yukon
1669 		 */
1670 		skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
1671 	}
1672 
1673 	skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
1674 }
1675 
1676 /* Setup Bus Memory Interface */
skge_qset(struct skge_port * skge,u16 q,const struct skge_element * e)1677 static void skge_qset(struct skge_port *skge, u16 q,
1678 		      const struct skge_element *e)
1679 {
1680 	struct skge_hw *hw = skge->hw;
1681 	u32 watermark = 0x600;
1682 	u64 base = skge->dma + (e->desc - skge->mem);
1683 
1684 	/* optimization to reduce window on 32bit/33mhz */
1685 	if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0)
1686 		watermark /= 2;
1687 
1688 	skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET);
1689 	skge_write32(hw, Q_ADDR(q, Q_F), watermark);
1690 	skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32));
1691 	skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base);
1692 }
1693 
skge_free(struct net_device * dev)1694 void skge_free(struct net_device *dev)
1695 {
1696 	struct skge_port *skge = netdev_priv(dev);
1697 
1698 	free(skge->rx_ring.start);
1699 	skge->rx_ring.start = NULL;
1700 
1701 	free(skge->tx_ring.start);
1702 	skge->tx_ring.start = NULL;
1703 
1704 	free_dma(skge->mem, RING_SIZE);
1705 	skge->mem = NULL;
1706 	skge->dma = 0;
1707 }
1708 
skge_up(struct net_device * dev)1709 static int skge_up(struct net_device *dev)
1710 {
1711 	struct skge_port *skge = netdev_priv(dev);
1712 	struct skge_hw *hw = skge->hw;
1713 	int port = skge->port;
1714 	u32 chunk, ram_addr;
1715 	int err;
1716 
1717 	DBG2(PFX "%s: enabling interface\n", dev->name);
1718 
1719 	skge->mem = malloc_dma(RING_SIZE, SKGE_RING_ALIGN);
1720 	skge->dma = virt_to_bus(skge->mem);
1721 	if (!skge->mem)
1722 		return -ENOMEM;
1723 	memset(skge->mem, 0, RING_SIZE);
1724 
1725 	assert(!(skge->dma & 7));
1726 
1727 	/* FIXME: find out whether 64 bit gPXE will be loaded > 4GB */
1728 	if ((u64)skge->dma >> 32 != ((u64) skge->dma + RING_SIZE) >> 32) {
1729 		DBG(PFX "pci_alloc_consistent region crosses 4G boundary\n");
1730 		err = -EINVAL;
1731 		goto err;
1732 	}
1733 
1734 	err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma, NUM_RX_DESC);
1735 	if (err)
1736 		goto err;
1737 
1738 	/* this call relies on e->iob and d->control to be 0
1739 	 * This is assured by calling memset() on skge->mem and using zalloc()
1740 	 * for the skge_element structures.
1741 	 */
1742 	skge_rx_refill(dev);
1743 
1744 	err = skge_ring_alloc(&skge->tx_ring, skge->mem + RX_RING_SIZE,
1745 			      skge->dma + RX_RING_SIZE, NUM_TX_DESC);
1746 	if (err)
1747 		goto err;
1748 
1749 	/* Initialize MAC */
1750 	if (hw->chip_id == CHIP_ID_GENESIS)
1751 		genesis_mac_init(hw, port);
1752 	else
1753 		yukon_mac_init(hw, port);
1754 
1755 	/* Configure RAMbuffers - equally between ports and tx/rx */
1756 	chunk = (hw->ram_size  - hw->ram_offset) / (hw->ports * 2);
1757 	ram_addr = hw->ram_offset + 2 * chunk * port;
1758 
1759 	skge_ramset(hw, rxqaddr[port], ram_addr, chunk);
1760 	skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean);
1761 
1762 	assert(!(skge->tx_ring.to_use != skge->tx_ring.to_clean));
1763 	skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk);
1764 	skge_qset(skge, txqaddr[port], skge->tx_ring.to_use);
1765 
1766 	/* Start receiver BMU */
1767 	wmb();
1768 	skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
1769 	skge_led(skge, LED_MODE_ON);
1770 
1771 	hw->intr_mask |= portmask[port];
1772 	skge_write32(hw, B0_IMSK, hw->intr_mask);
1773 
1774 	return 0;
1775 
1776  err:
1777 	skge_rx_clean(skge);
1778 	skge_free(dev);
1779 
1780 	return err;
1781 }
1782 
1783 /* stop receiver */
skge_rx_stop(struct skge_hw * hw,int port)1784 static void skge_rx_stop(struct skge_hw *hw, int port)
1785 {
1786 	skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP);
1787 	skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL),
1788 		     RB_RST_SET|RB_DIS_OP_MD);
1789 	skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET);
1790 }
1791 
skge_down(struct net_device * dev)1792 static void skge_down(struct net_device *dev)
1793 {
1794 	struct skge_port *skge = netdev_priv(dev);
1795 	struct skge_hw *hw = skge->hw;
1796 	int port = skge->port;
1797 
1798 	if (skge->mem == NULL)
1799 		return;
1800 
1801 	DBG2(PFX "%s: disabling interface\n", dev->name);
1802 
1803 	if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)
1804 		skge->use_xm_link_timer = 0;
1805 
1806 	netdev_link_down(dev);
1807 
1808 	hw->intr_mask &= ~portmask[port];
1809 	skge_write32(hw, B0_IMSK, hw->intr_mask);
1810 
1811 	skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
1812 	if (hw->chip_id == CHIP_ID_GENESIS)
1813 		genesis_stop(skge);
1814 	else
1815 		yukon_stop(skge);
1816 
1817 	/* Stop transmitter */
1818 	skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
1819 	skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
1820 		     RB_RST_SET|RB_DIS_OP_MD);
1821 
1822 
1823 	/* Disable Force Sync bit and Enable Alloc bit */
1824 	skge_write8(hw, SK_REG(port, TXA_CTRL),
1825 		    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
1826 
1827 	/* Stop Interval Timer and Limit Counter of Tx Arbiter */
1828 	skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
1829 	skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
1830 
1831 	/* Reset PCI FIFO */
1832 	skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET);
1833 	skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
1834 
1835 	/* Reset the RAM Buffer async Tx queue */
1836 	skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET);
1837 
1838 	skge_rx_stop(hw, port);
1839 
1840 	if (hw->chip_id == CHIP_ID_GENESIS) {
1841 		skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET);
1842 		skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET);
1843 	} else {
1844 		skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
1845 		skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1846 	}
1847 
1848 	skge_led(skge, LED_MODE_OFF);
1849 
1850 	skge_tx_clean(dev);
1851 
1852 	skge_rx_clean(skge);
1853 
1854 	skge_free(dev);
1855 	return;
1856 }
1857 
skge_tx_avail(const struct skge_ring * ring)1858 static inline int skge_tx_avail(const struct skge_ring *ring)
1859 {
1860 	mb();
1861 	return ((ring->to_clean > ring->to_use) ? 0 : NUM_TX_DESC)
1862 		+ (ring->to_clean - ring->to_use) - 1;
1863 }
1864 
skge_xmit_frame(struct net_device * dev,struct io_buffer * iob)1865 static int skge_xmit_frame(struct net_device *dev, struct io_buffer *iob)
1866 {
1867 	struct skge_port *skge = netdev_priv(dev);
1868 	struct skge_hw *hw = skge->hw;
1869 	struct skge_element *e;
1870 	struct skge_tx_desc *td;
1871 	u32 control, len;
1872 	u64 map;
1873 
1874 	if (skge_tx_avail(&skge->tx_ring) < 1)
1875 		return -EBUSY;
1876 
1877 	e = skge->tx_ring.to_use;
1878 	td = e->desc;
1879 	assert(!(td->control & BMU_OWN));
1880 	e->iob = iob;
1881 	len = iob_len(iob);
1882 	map = virt_to_bus(iob->data);
1883 
1884 	td->dma_lo = map;
1885 	td->dma_hi = map >> 32;
1886 
1887 	control = BMU_CHECK;
1888 
1889 	control |= BMU_EOF| BMU_IRQ_EOF;
1890 	/* Make sure all the descriptors written */
1891 	wmb();
1892 	td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
1893 	wmb();
1894 
1895 	skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
1896 
1897 	DBGIO(PFX "%s: tx queued, slot %td, len %d\n",
1898 	     dev->name, e - skge->tx_ring.start, (unsigned int)len);
1899 
1900 	skge->tx_ring.to_use = e->next;
1901 	wmb();
1902 
1903 	if (skge_tx_avail(&skge->tx_ring) <= 1) {
1904 		DBG(PFX "%s: transmit queue full\n", dev->name);
1905 	}
1906 
1907 	return 0;
1908 }
1909 
1910 /* Free all buffers in transmit ring */
skge_tx_clean(struct net_device * dev)1911 static void skge_tx_clean(struct net_device *dev)
1912 {
1913 	struct skge_port *skge = netdev_priv(dev);
1914 	struct skge_element *e;
1915 
1916 	for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
1917 		struct skge_tx_desc *td = e->desc;
1918 		td->control = 0;
1919 	}
1920 
1921 	skge->tx_ring.to_clean = e;
1922 }
1923 
1924 static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
1925 
phy_length(const struct skge_hw * hw,u32 status)1926 static inline u16 phy_length(const struct skge_hw *hw, u32 status)
1927 {
1928 	if (hw->chip_id == CHIP_ID_GENESIS)
1929 		return status >> XMR_FS_LEN_SHIFT;
1930 	else
1931 		return status >> GMR_FS_LEN_SHIFT;
1932 }
1933 
bad_phy_status(const struct skge_hw * hw,u32 status)1934 static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
1935 {
1936 	if (hw->chip_id == CHIP_ID_GENESIS)
1937 		return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0;
1938 	else
1939 		return (status & GMR_FS_ANY_ERR) ||
1940 			(status & GMR_FS_RX_OK) == 0;
1941 }
1942 
1943 /* Free all buffers in Tx ring which are no longer owned by device */
skge_tx_done(struct net_device * dev)1944 static void skge_tx_done(struct net_device *dev)
1945 {
1946 	struct skge_port *skge = netdev_priv(dev);
1947 	struct skge_ring *ring = &skge->tx_ring;
1948 	struct skge_element *e;
1949 
1950 	skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
1951 
1952 	for (e = ring->to_clean; e != ring->to_use; e = e->next) {
1953 		u32 control = ((const struct skge_tx_desc *) e->desc)->control;
1954 
1955 		if (control & BMU_OWN)
1956 			break;
1957 
1958 		netdev_tx_complete(dev, e->iob);
1959 	}
1960 	skge->tx_ring.to_clean = e;
1961 
1962 	/* Can run lockless until we need to synchronize to restart queue. */
1963 	mb();
1964 }
1965 
skge_rx_refill(struct net_device * dev)1966 static void skge_rx_refill(struct net_device *dev)
1967 {
1968 	struct skge_port *skge = netdev_priv(dev);
1969 	struct skge_ring *ring = &skge->rx_ring;
1970 	struct skge_element *e;
1971 	struct io_buffer *iob;
1972 	struct skge_rx_desc *rd;
1973 	u32 control;
1974 	int i;
1975 
1976 	for (i = 0; i < NUM_RX_DESC; i++) {
1977 		e = ring->to_clean;
1978 		rd = e->desc;
1979 		iob = e->iob;
1980 		control = rd->control;
1981 
1982 		/* nothing to do here */
1983 		if (iob || (control & BMU_OWN))
1984 			continue;
1985 
1986 		DBG2("refilling rx desc %d: ", (ring->to_clean - ring->start));
1987 
1988 		iob = alloc_iob(RX_BUF_SIZE);
1989 		if (iob) {
1990 			skge_rx_setup(skge, e, iob, RX_BUF_SIZE);
1991 		} else {
1992 			DBG("descr %d: alloc_iob() failed\n",
1993 			     (ring->to_clean - ring->start));
1994 			/* We pass the descriptor to the NIC even if the
1995 			 * allocation failed. The card will stop as soon as it
1996 			 * encounters a descriptor with the OWN bit set to 0,
1997 			 * thus never getting to the next descriptor that might
1998 			 * contain a valid io_buffer. This would effectively
1999 			 * stall the receive.
2000 			 */
2001 			skge_rx_setup(skge, e, NULL, 0);
2002 		}
2003 
2004 		ring->to_clean = e->next;
2005 	}
2006 }
2007 
skge_rx_done(struct net_device * dev)2008 static void skge_rx_done(struct net_device *dev)
2009 {
2010 	struct skge_port *skge = netdev_priv(dev);
2011 	struct skge_ring *ring = &skge->rx_ring;
2012 	struct skge_rx_desc *rd;
2013 	struct skge_element *e;
2014 	struct io_buffer *iob;
2015 	u32 control;
2016 	u16 len;
2017 	int i;
2018 
2019 	e = ring->to_clean;
2020 	for (i = 0; i < NUM_RX_DESC; i++) {
2021 		iob = e->iob;
2022 		rd = e->desc;
2023 
2024 		rmb();
2025 		control = rd->control;
2026 
2027 		if ((control & BMU_OWN))
2028 			break;
2029 
2030 		if (!iob)
2031 			continue;
2032 
2033 		len = control & BMU_BBC;
2034 
2035 		/* catch RX errors */
2036 		if ((bad_phy_status(skge->hw, rd->status)) ||
2037 		   (phy_length(skge->hw, rd->status) != len)) {
2038 			/* report receive errors */
2039 			DBG("rx error\n");
2040 			netdev_rx_err(dev, iob, -EIO);
2041 		} else {
2042 			DBG2("received packet, len %d\n", len);
2043 			iob_put(iob, len);
2044 			netdev_rx(dev, iob);
2045 		}
2046 
2047 		/* io_buffer passed to core, make sure we don't reuse it */
2048 		e->iob = NULL;
2049 
2050 		e = e->next;
2051 	}
2052 	skge_rx_refill(dev);
2053 }
2054 
skge_poll(struct net_device * dev)2055 static void skge_poll(struct net_device *dev)
2056 {
2057 	struct skge_port *skge = netdev_priv(dev);
2058 	struct skge_hw *hw = skge->hw;
2059 	u32 status;
2060 
2061 	/* reading this register ACKs interrupts */
2062 	status = skge_read32(hw, B0_SP_ISRC);
2063 
2064 	/* Link event? */
2065 	if (status & IS_EXT_REG) {
2066 		skge_phyirq(hw);
2067 		if (skge->use_xm_link_timer)
2068 			xm_link_timer(skge);
2069 	}
2070 
2071 	skge_tx_done(dev);
2072 
2073 	skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
2074 
2075 	skge_rx_done(dev);
2076 
2077 	/* restart receiver */
2078 	wmb();
2079 	skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
2080 
2081 	skge_read32(hw, B0_IMSK);
2082 
2083 	return;
2084 }
2085 
skge_phyirq(struct skge_hw * hw)2086 static void skge_phyirq(struct skge_hw *hw)
2087 {
2088 	int port;
2089 
2090 	for (port = 0; port < hw->ports; port++) {
2091 		struct net_device *dev = hw->dev[port];
2092 		struct skge_port *skge = netdev_priv(dev);
2093 
2094 		if (hw->chip_id != CHIP_ID_GENESIS)
2095 			yukon_phy_intr(skge);
2096 		else if (hw->phy_type == SK_PHY_BCOM)
2097 			bcom_phy_intr(skge);
2098 	}
2099 
2100 	hw->intr_mask |= IS_EXT_REG;
2101 	skge_write32(hw, B0_IMSK, hw->intr_mask);
2102 	skge_read32(hw, B0_IMSK);
2103 }
2104 
2105 static const struct {
2106 	u8 id;
2107 	const char *name;
2108 } skge_chips[] = {
2109 	{ CHIP_ID_GENESIS,	"Genesis" },
2110 	{ CHIP_ID_YUKON,	 "Yukon" },
2111 	{ CHIP_ID_YUKON_LITE,	 "Yukon-Lite"},
2112 	{ CHIP_ID_YUKON_LP,	 "Yukon-LP"},
2113 };
2114 
skge_board_name(const struct skge_hw * hw)2115 static const char *skge_board_name(const struct skge_hw *hw)
2116 {
2117 	unsigned int i;
2118 	static char buf[16];
2119 
2120 	for (i = 0; i < ARRAY_SIZE(skge_chips); i++)
2121 		if (skge_chips[i].id == hw->chip_id)
2122 			return skge_chips[i].name;
2123 
2124 	snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id);
2125 	return buf;
2126 }
2127 
2128 
2129 /*
2130  * Setup the board data structure, but don't bring up
2131  * the port(s)
2132  */
skge_reset(struct skge_hw * hw)2133 static int skge_reset(struct skge_hw *hw)
2134 {
2135 	u32 reg;
2136 	u16 ctst, pci_status;
2137 	u8 t8, mac_cfg, pmd_type;
2138 	int i;
2139 
2140 	ctst = skge_read16(hw, B0_CTST);
2141 
2142 	/* do a SW reset */
2143 	skge_write8(hw, B0_CTST, CS_RST_SET);
2144 	skge_write8(hw, B0_CTST, CS_RST_CLR);
2145 
2146 	/* clear PCI errors, if any */
2147 	skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2148 	skge_write8(hw, B2_TST_CTRL2, 0);
2149 
2150 	pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
2151 	pci_write_config_word(hw->pdev, PCI_STATUS,
2152 			      pci_status | PCI_STATUS_ERROR_BITS);
2153 	skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2154 	skge_write8(hw, B0_CTST, CS_MRST_CLR);
2155 
2156 	/* restore CLK_RUN bits (for Yukon-Lite) */
2157 	skge_write16(hw, B0_CTST,
2158 		     ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA));
2159 
2160 	hw->chip_id = skge_read8(hw, B2_CHIP_ID);
2161 	hw->phy_type = skge_read8(hw, B2_E_1) & 0xf;
2162 	pmd_type = skge_read8(hw, B2_PMD_TYP);
2163 	hw->copper = (pmd_type == 'T' || pmd_type == '1');
2164 
2165 	switch (hw->chip_id) {
2166 	case CHIP_ID_GENESIS:
2167 		switch (hw->phy_type) {
2168 		case SK_PHY_XMAC:
2169 			hw->phy_addr = PHY_ADDR_XMAC;
2170 			break;
2171 		case SK_PHY_BCOM:
2172 			hw->phy_addr = PHY_ADDR_BCOM;
2173 			break;
2174 		default:
2175 			DBG(PFX "unsupported phy type 0x%x\n",
2176 			       hw->phy_type);
2177 			return -EOPNOTSUPP;
2178 		}
2179 		break;
2180 
2181 	case CHIP_ID_YUKON:
2182 	case CHIP_ID_YUKON_LITE:
2183 	case CHIP_ID_YUKON_LP:
2184 		if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S')
2185 			hw->copper = 1;
2186 
2187 		hw->phy_addr = PHY_ADDR_MARV;
2188 		break;
2189 
2190 	default:
2191 		DBG(PFX "unsupported chip type 0x%x\n",
2192 		       hw->chip_id);
2193 		return -EOPNOTSUPP;
2194 	}
2195 
2196 	mac_cfg = skge_read8(hw, B2_MAC_CFG);
2197 	hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2;
2198 	hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4;
2199 
2200 	/* read the adapters RAM size */
2201 	t8 = skge_read8(hw, B2_E_0);
2202 	if (hw->chip_id == CHIP_ID_GENESIS) {
2203 		if (t8 == 3) {
2204 			/* special case: 4 x 64k x 36, offset = 0x80000 */
2205 			hw->ram_size = 0x100000;
2206 			hw->ram_offset = 0x80000;
2207 		} else
2208 			hw->ram_size = t8 * 512;
2209 	}
2210 	else if (t8 == 0)
2211 		hw->ram_size = 0x20000;
2212 	else
2213 		hw->ram_size = t8 * 4096;
2214 
2215 	hw->intr_mask = IS_HW_ERR;
2216 
2217 	/* Use PHY IRQ for all but fiber based Genesis board */
2218 	if (!(hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC))
2219 		hw->intr_mask |= IS_EXT_REG;
2220 
2221 	if (hw->chip_id == CHIP_ID_GENESIS)
2222 		genesis_init(hw);
2223 	else {
2224 		/* switch power to VCC (WA for VAUX problem) */
2225 		skge_write8(hw, B0_POWER_CTRL,
2226 			    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
2227 
2228 		/* avoid boards with stuck Hardware error bits */
2229 		if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
2230 		    (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
2231 			DBG(PFX "stuck hardware sensor bit\n");
2232 			hw->intr_mask &= ~IS_HW_ERR;
2233 		}
2234 
2235 		/* Clear PHY COMA */
2236 		skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2237 		pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg);
2238 		reg &= ~PCI_PHY_COMA;
2239 		pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg);
2240 		skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2241 
2242 
2243 		for (i = 0; i < hw->ports; i++) {
2244 			skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
2245 			skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
2246 		}
2247 	}
2248 
2249 	/* turn off hardware timer (unused) */
2250 	skge_write8(hw, B2_TI_CTRL, TIM_STOP);
2251 	skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
2252 	skge_write8(hw, B0_LED, LED_STAT_ON);
2253 
2254 	/* enable the Tx Arbiters */
2255 	for (i = 0; i < hw->ports; i++)
2256 		skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
2257 
2258 	/* Initialize ram interface */
2259 	skge_write16(hw, B3_RI_CTRL, RI_RST_CLR);
2260 
2261 	skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53);
2262 	skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53);
2263 	skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53);
2264 	skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53);
2265 	skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53);
2266 	skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53);
2267 	skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53);
2268 	skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53);
2269 	skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53);
2270 	skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53);
2271 	skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53);
2272 	skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53);
2273 
2274 	skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK);
2275 
2276 	/* Set interrupt moderation for Transmit only
2277 	 * Receive interrupts avoided by NAPI
2278 	 */
2279 	skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F);
2280 	skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
2281 	skge_write32(hw, B2_IRQM_CTRL, TIM_START);
2282 
2283 	skge_write32(hw, B0_IMSK, hw->intr_mask);
2284 
2285 	for (i = 0; i < hw->ports; i++) {
2286 		if (hw->chip_id == CHIP_ID_GENESIS)
2287 			genesis_reset(hw, i);
2288 		else
2289 			yukon_reset(hw, i);
2290 	}
2291 
2292 	return 0;
2293 }
2294 
2295 /* Initialize network device */
skge_devinit(struct skge_hw * hw,int port,int highmem __unused)2296 static struct net_device *skge_devinit(struct skge_hw *hw, int port,
2297 				       int highmem __unused)
2298 {
2299 	struct skge_port *skge;
2300 	struct net_device *dev = alloc_etherdev(sizeof(*skge));
2301 
2302 	if (!dev) {
2303 		DBG(PFX "etherdev alloc failed\n");
2304 		return NULL;
2305 	}
2306 
2307 	dev->dev = &hw->pdev->dev;
2308 
2309 	skge = netdev_priv(dev);
2310 	skge->netdev = dev;
2311 	skge->hw = hw;
2312 
2313 	/* Auto speed and flow control */
2314 	skge->autoneg = AUTONEG_ENABLE;
2315 	skge->flow_control = FLOW_MODE_SYM_OR_REM;
2316 	skge->duplex = -1;
2317 	skge->speed = -1;
2318 	skge->advertising = skge_supported_modes(hw);
2319 
2320 	hw->dev[port] = dev;
2321 
2322 	skge->port = port;
2323 
2324 	/* read the mac address */
2325 	memcpy(dev->hw_addr, (void *) (hw->regs + B2_MAC_1 + port*8), ETH_ALEN);
2326 
2327 	/* device is off until link detection */
2328 	netdev_link_down(dev);
2329 
2330 	return dev;
2331 }
2332 
skge_show_addr(struct net_device * dev)2333 static void skge_show_addr(struct net_device *dev)
2334 {
2335 	DBG2(PFX "%s: addr %s\n",
2336 	     dev->name, netdev_addr(dev));
2337 }
2338 
skge_probe(struct pci_device * pdev,const struct pci_device_id * ent __unused)2339 static int skge_probe(struct pci_device *pdev,
2340 				const struct pci_device_id *ent __unused)
2341 {
2342 	struct net_device *dev, *dev1;
2343 	struct skge_hw *hw;
2344 	int err, using_dac = 0;
2345 
2346 	adjust_pci_device(pdev);
2347 
2348 	err = -ENOMEM;
2349 	hw = zalloc(sizeof(*hw));
2350 	if (!hw) {
2351 		DBG(PFX "cannot allocate hardware struct\n");
2352 		goto err_out_free_regions;
2353 	}
2354 
2355 	hw->pdev = pdev;
2356 
2357 	hw->regs = (u32)ioremap(pci_bar_start(pdev, PCI_BASE_ADDRESS_0),
2358 				SKGE_REG_SIZE);
2359 	if (!hw->regs) {
2360 		DBG(PFX "cannot map device registers\n");
2361 		goto err_out_free_hw;
2362 	}
2363 
2364 	err = skge_reset(hw);
2365 	if (err)
2366 		goto err_out_iounmap;
2367 
2368 	DBG(PFX " addr 0x%llx irq %d chip %s rev %d\n",
2369 	    (unsigned long long)pdev->ioaddr, pdev->irq,
2370 	    skge_board_name(hw), hw->chip_rev);
2371 
2372 	dev = skge_devinit(hw, 0, using_dac);
2373 	if (!dev)
2374 		goto err_out_led_off;
2375 
2376 	netdev_init ( dev, &skge_operations );
2377 
2378 	err = register_netdev(dev);
2379 	if (err) {
2380 		DBG(PFX "cannot register net device\n");
2381 		goto err_out_free_netdev;
2382 	}
2383 
2384 	skge_show_addr(dev);
2385 
2386 	if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
2387 		if (register_netdev(dev1) == 0)
2388 			skge_show_addr(dev1);
2389 		else {
2390 			/* Failure to register second port need not be fatal */
2391 			DBG(PFX "register of second port failed\n");
2392 			hw->dev[1] = NULL;
2393 			netdev_nullify(dev1);
2394 			netdev_put(dev1);
2395 		}
2396 	}
2397 	pci_set_drvdata(pdev, hw);
2398 
2399 	return 0;
2400 
2401 err_out_free_netdev:
2402 	netdev_nullify(dev);
2403 	netdev_put(dev);
2404 err_out_led_off:
2405 	skge_write16(hw, B0_LED, LED_STAT_OFF);
2406 err_out_iounmap:
2407 	iounmap((void*)hw->regs);
2408 err_out_free_hw:
2409 	free(hw);
2410 err_out_free_regions:
2411 	pci_set_drvdata(pdev, NULL);
2412 	return err;
2413 }
2414 
skge_remove(struct pci_device * pdev)2415 static void skge_remove(struct pci_device *pdev)
2416 {
2417 	struct skge_hw *hw  = pci_get_drvdata(pdev);
2418 	struct net_device *dev0, *dev1;
2419 
2420 	if (!hw)
2421 		return;
2422 
2423 	if ((dev1 = hw->dev[1]))
2424 		unregister_netdev(dev1);
2425 	dev0 = hw->dev[0];
2426 	unregister_netdev(dev0);
2427 
2428 	hw->intr_mask = 0;
2429 	skge_write32(hw, B0_IMSK, 0);
2430 	skge_read32(hw, B0_IMSK);
2431 
2432 	skge_write16(hw, B0_LED, LED_STAT_OFF);
2433 	skge_write8(hw, B0_CTST, CS_RST_SET);
2434 
2435 	if (dev1) {
2436 		netdev_nullify(dev1);
2437 		netdev_put(dev1);
2438 	}
2439 	netdev_nullify(dev0);
2440 	netdev_put(dev0);
2441 
2442 	iounmap((void*)hw->regs);
2443 	free(hw);
2444 	pci_set_drvdata(pdev, NULL);
2445 }
2446 
2447 /*
2448  * Enable or disable IRQ masking.
2449  *
2450  * @v netdev		Device to control.
2451  * @v enable		Zero to mask off IRQ, non-zero to enable IRQ.
2452  *
2453  * This is a gPXE Network Driver API function.
2454  */
skge_net_irq(struct net_device * dev,int enable)2455 static void skge_net_irq ( struct net_device *dev, int enable ) {
2456 	struct skge_port *skge = netdev_priv(dev);
2457 	struct skge_hw *hw = skge->hw;
2458 
2459 	if (enable)
2460 		hw->intr_mask |= portmask[skge->port];
2461 	else
2462 		hw->intr_mask &= ~portmask[skge->port];
2463 	skge_write32(hw, B0_IMSK, hw->intr_mask);
2464 }
2465 
2466 struct pci_driver skge_driver __pci_driver = {
2467 	.ids      = skge_id_table,
2468 	.id_count = ( sizeof (skge_id_table) / sizeof (skge_id_table[0]) ),
2469 	.probe    = skge_probe,
2470 	.remove   = skge_remove
2471 };
2472 
2473