• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2008 Stefan Hajnoczi <stefanha@gmail.com>
3  * Copyright (c) 2008 Pantelis Koukousoulas <pktoss@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation; either version 2 of the
8  * License, or any later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18  *
19  * This driver is a port of the b44 linux driver version 1.01
20  *
21  * Copyright (c) 2002 David S. Miller <davem@redhat.com>
22  * Copyright (c) Pekka Pietikainen <pp@ee.oulu.fi>
23  * Copyright (C) 2006 Broadcom Corporation.
24  *
25  * Some ssb bits copied from version 2.0 of the b44 driver
26  * Copyright (c) Michael Buesch
27  *
28  * Copyright (c) a lot of people too. Please respect their work.
29  */
30 
31 FILE_LICENCE ( GPL2_OR_LATER );
32 
33 #include <errno.h>
34 #include <assert.h>
35 #include <stdio.h>
36 #include <unistd.h>
37 #include <byteswap.h>
38 #include <gpxe/io.h>
39 #include <mii.h>
40 #include <gpxe/iobuf.h>
41 #include <gpxe/malloc.h>
42 #include <gpxe/pci.h>
43 #include <gpxe/netdevice.h>
44 #include <gpxe/ethernet.h>
45 #include <gpxe/if_ether.h>
46 #include <gpxe/memmap.h>
47 #include "b44.h"
48 
49 
ring_next(int index)50 static inline int ring_next(int index)
51 {
52 	/* B44_RING_SIZE is a power of 2 :) */
53 	return (index + 1) & (B44_RING_SIZE - 1);
54 }
55 
56 
57 /* Memory-mapped I/O wrappers */
58 
br32(const struct b44_private * bp,u32 reg)59 static inline u32 br32(const struct b44_private *bp, u32 reg)
60 {
61 	return readl(bp->regs + reg);
62 }
63 
64 
bw32(const struct b44_private * bp,u32 reg,u32 val)65 static inline void bw32(const struct b44_private *bp, u32 reg, u32 val)
66 {
67 	writel(val, bp->regs + reg);
68 }
69 
70 
bflush(const struct b44_private * bp,u32 reg,u32 timeout)71 static inline void bflush(const struct b44_private *bp, u32 reg, u32 timeout)
72 {
73 	readl(bp->regs + reg);
74 	udelay(timeout);
75 }
76 
77 
78 #define VIRT_TO_B44(addr)	( virt_to_bus(addr) + SB_PCI_DMA )
79 
80 
81 /**
82  * Return non-zero if the installed RAM is within
83  * the limit given and zero if it is outside.
84  * Hopefully will be removed soon.
85  */
phys_ram_within_limit(u64 limit)86 int phys_ram_within_limit(u64 limit)
87 {
88 	struct memory_map memmap;
89 	struct memory_region *highest = NULL;
90 	get_memmap(&memmap);
91 
92 	highest = &memmap.regions[memmap.count - 1];
93 
94 	return (highest->end < limit);
95 }
96 
97 
98 /**
99  * Ring cells waiting to be processed are between 'tx_cur' and 'pending'
100  * indexes in the ring.
101  */
pending_tx_index(struct b44_private * bp)102 static u32 pending_tx_index(struct b44_private *bp)
103 {
104 	u32 pending = br32(bp, B44_DMATX_STAT);
105 	pending &= DMATX_STAT_CDMASK;
106 
107 	pending /= sizeof(struct dma_desc);
108 	return pending & (B44_RING_SIZE - 1);
109 }
110 
111 
112 /**
113  * Ring cells waiting to be processed are between 'rx_cur' and 'pending'
114  * indexes in the ring.
115  */
pending_rx_index(struct b44_private * bp)116 static u32 pending_rx_index(struct b44_private *bp)
117 {
118 	u32 pending = br32(bp, B44_DMARX_STAT);
119 	pending &= DMARX_STAT_CDMASK;
120 
121 	pending /= sizeof(struct dma_desc);
122 	return pending & (B44_RING_SIZE - 1);
123 }
124 
125 
126 /**
127  * Wait until the given bit is set/cleared.
128  */
b44_wait_bit(struct b44_private * bp,unsigned long reg,u32 bit,unsigned long timeout,const int clear)129 static int b44_wait_bit(struct b44_private *bp, unsigned long reg, u32 bit,
130 			            unsigned long timeout, const int clear)
131 {
132 	unsigned long i;
133 
134 	for (i = 0; i < timeout; i++) {
135 		u32 val = br32(bp, reg);
136 
137 		if (clear && !(val & bit))
138 			break;
139 
140 		if (!clear && (val & bit))
141 			break;
142 
143 		udelay(10);
144 	}
145 	if (i == timeout) {
146 		return -ENODEV;
147 	}
148 	return 0;
149 }
150 
151 
152 /*
153  * Sonics Silicon Backplane support. SSB is a mini-bus interconnecting
154  * so-called IP Cores. One of those cores implements the Fast Ethernet
155  * functionality and another one the PCI engine.
156  *
157  * You need to switch to the core you want to talk to before actually
158  * sending commands.
159  *
160  * See: http://bcm-v4.sipsolutions.net/Backplane for (reverse-engineered)
161  * specs.
162  */
163 
ssb_get_core_rev(struct b44_private * bp)164 static inline u32 ssb_get_core_rev(struct b44_private *bp)
165 {
166 	return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
167 }
168 
169 
ssb_is_core_up(struct b44_private * bp)170 static inline int ssb_is_core_up(struct b44_private *bp)
171 {
172 	return ((br32(bp, B44_SBTMSLOW) & (SSB_CORE_DOWN | SBTMSLOW_CLOCK))
173 	                                                == SBTMSLOW_CLOCK);
174 }
175 
176 
ssb_pci_setup(struct b44_private * bp,u32 cores)177 static u32 ssb_pci_setup(struct b44_private *bp, u32 cores)
178 {
179 	u32 bar_orig, pci_rev, val;
180 
181 	pci_read_config_dword(bp->pci, SSB_BAR0_WIN, &bar_orig);
182 	pci_write_config_dword(bp->pci, SSB_BAR0_WIN,
183 	                       BCM4400_PCI_CORE_ADDR);
184 	pci_rev = ssb_get_core_rev(bp);
185 
186 	val = br32(bp, B44_SBINTVEC);
187 	val |= cores;
188 	bw32(bp, B44_SBINTVEC, val);
189 
190 	val = br32(bp, SSB_PCI_TRANS_2);
191 	val |= SSB_PCI_PREF | SSB_PCI_BURST;
192 	bw32(bp, SSB_PCI_TRANS_2, val);
193 
194 	pci_write_config_dword(bp->pci, SSB_BAR0_WIN, bar_orig);
195 
196 	return pci_rev;
197 }
198 
199 
ssb_core_disable(struct b44_private * bp)200 static void ssb_core_disable(struct b44_private *bp)
201 {
202 	if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
203 		return;
204 
205 	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
206 	b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
207 	b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
208 
209 	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
210 	                                        SSB_CORE_DOWN));
211 	bflush(bp, B44_SBTMSLOW, 1);
212 
213 	bw32(bp, B44_SBTMSLOW, SSB_CORE_DOWN);
214 	bflush(bp, B44_SBTMSLOW, 1);
215 }
216 
217 
ssb_core_reset(struct b44_private * bp)218 static void ssb_core_reset(struct b44_private *bp)
219 {
220 	u32 val;
221 	const u32 mask = (SBTMSLOW_CLOCK | SBTMSLOW_FGC | SBTMSLOW_RESET);
222 
223 	ssb_core_disable(bp);
224 
225 	bw32(bp, B44_SBTMSLOW, mask);
226 	bflush(bp, B44_SBTMSLOW, 1);
227 
228 	/* Clear SERR if set, this is a hw bug workaround.  */
229 	if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
230 		bw32(bp, B44_SBTMSHIGH, 0);
231 
232 	val = br32(bp, B44_SBIMSTATE);
233 	if (val & (SBIMSTATE_BAD)) {
234 		bw32(bp, B44_SBIMSTATE, val & ~SBIMSTATE_BAD);
235 	}
236 
237 	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
238 	bflush(bp, B44_SBTMSLOW, 1);
239 
240 	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
241 	bflush(bp, B44_SBTMSLOW, 1);
242 }
243 
244 
245 /*
246  * Driver helper functions
247  */
248 
249 /*
250  * Chip reset provides power to the b44 MAC & PCI cores, which
251  * is necessary for MAC register access. We only do a partial
252  * reset in case of transmit/receive errors (ISTAT_ERRORS) to
253  * avoid the chip being hung for an unnecessary long time in
254  * this case.
255  *
256  * Called-by: b44_close, b44_halt, b44_inithw(b44_open), b44_probe
257  */
b44_chip_reset(struct b44_private * bp,int reset_kind)258 static void b44_chip_reset(struct b44_private *bp, int reset_kind)
259 {
260 	if (ssb_is_core_up(bp)) {
261 		bw32(bp, B44_RCV_LAZY, 0);
262 
263 		bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
264 
265 		b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
266 
267 		bw32(bp, B44_DMATX_CTRL, 0);
268 
269 		bp->tx_dirty = bp->tx_cur = 0;
270 
271 		if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK)
272 			b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
273 			                                          100, 0);
274 
275 		bw32(bp, B44_DMARX_CTRL, 0);
276 
277 		bp->rx_cur = 0;
278 	} else {
279 		ssb_pci_setup(bp, SBINTVEC_ENET0);
280 	}
281 
282 	ssb_core_reset(bp);
283 
284 	/* Don't enable PHY if we are only doing a partial reset. */
285 	if (reset_kind == B44_CHIP_RESET_PARTIAL)
286 		return;
287 
288 	/* Make PHY accessible. */
289 	bw32(bp, B44_MDIO_CTRL,
290 	     (MDIO_CTRL_PREAMBLE | (0x0d & MDIO_CTRL_MAXF_MASK)));
291 	bflush(bp, B44_MDIO_CTRL, 1);
292 
293 	/* Enable internal or external PHY */
294 	if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
295 		bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
296 		bflush(bp, B44_ENET_CTRL, 1);
297 	} else {
298 		u32 val = br32(bp, B44_DEVCTRL);
299 		if (val & DEVCTRL_EPR) {
300 			bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
301 			bflush(bp, B44_DEVCTRL, 100);
302 		}
303 	}
304 }
305 
306 
307 /**
308  * called by b44_poll in the error path
309  */
b44_halt(struct b44_private * bp)310 static void b44_halt(struct b44_private *bp)
311 {
312 	/* disable ints */
313 	bw32(bp, B44_IMASK, 0);
314 	bflush(bp, B44_IMASK, 1);
315 
316 	DBG("b44: powering down PHY\n");
317 	bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
318 
319 	/*
320 	 * Now reset the chip, but without enabling
321 	 * the MAC&PHY part of it.
322 	 * This has to be done _after_ we shut down the PHY
323 	 */
324 	b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
325 }
326 
327 
328 
329 /*
330  * Called at device open time to get the chip ready for
331  * packet processing.
332  *
333  * Called-by: b44_open
334  */
b44_init_hw(struct b44_private * bp,int reset_kind)335 static void b44_init_hw(struct b44_private *bp, int reset_kind)
336 {
337 	u32 val;
338 #define CTRL_MASK (DMARX_CTRL_ENABLE | (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))
339 
340 	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
341 	if (reset_kind == B44_FULL_RESET) {
342 		b44_phy_reset(bp);
343 	}
344 
345 	/* Enable CRC32, set proper LED modes and power on PHY */
346 	bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
347 	bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
348 
349 	/* This sets the MAC address too.  */
350 	b44_set_rx_mode(bp->netdev);
351 
352 	/* MTU + eth header + possible VLAN tag + struct rx_header */
353 	bw32(bp, B44_RXMAXLEN, B44_MAX_MTU + ETH_HLEN + 8 + RX_HEADER_LEN);
354 	bw32(bp, B44_TXMAXLEN, B44_MAX_MTU + ETH_HLEN + 8 + RX_HEADER_LEN);
355 
356 	bw32(bp, B44_TX_HIWMARK, TX_HIWMARK_DEFLT);
357 	if (reset_kind == B44_PARTIAL_RESET) {
358 		bw32(bp, B44_DMARX_CTRL, CTRL_MASK);
359 	} else {
360 		bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
361 		bw32(bp, B44_DMATX_ADDR, VIRT_TO_B44(bp->tx));
362 
363 		bw32(bp, B44_DMARX_CTRL, CTRL_MASK);
364 		bw32(bp, B44_DMARX_ADDR, VIRT_TO_B44(bp->rx));
365 		bw32(bp, B44_DMARX_PTR, B44_RX_RING_LEN_BYTES);
366 
367 		bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
368 	}
369 
370 	val = br32(bp, B44_ENET_CTRL);
371 	bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
372 #undef CTRL_MASK
373 }
374 
375 
376 /***  Management of ring descriptors  ***/
377 
378 
b44_populate_rx_descriptor(struct b44_private * bp,u32 idx)379 static void b44_populate_rx_descriptor(struct b44_private *bp, u32 idx)
380 {
381 	struct rx_header *rh;
382 	u32 ctrl, addr;
383 
384 	rh = bp->rx_iobuf[idx]->data;
385 	rh->len = 0;
386 	rh->flags = 0;
387 	ctrl = DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET);
388 	if (idx == B44_RING_LAST) {
389 		ctrl |= DESC_CTRL_EOT;
390 	}
391 	addr = VIRT_TO_B44(bp->rx_iobuf[idx]->data);
392 
393 	bp->rx[idx].ctrl = cpu_to_le32(ctrl);
394 	bp->rx[idx].addr = cpu_to_le32(addr);
395 	bw32(bp, B44_DMARX_PTR, idx * sizeof(struct dma_desc));
396 }
397 
398 
399 /*
400  * Refill RX ring descriptors with buffers. This is needed
401  * because during rx we are passing ownership of descriptor
402  * buffers to the network stack.
403  */
b44_rx_refill(struct b44_private * bp,u32 pending)404 static void b44_rx_refill(struct b44_private *bp, u32 pending)
405 {
406 	u32 i;
407 
408 	// skip pending
409 	for (i = pending + 1; i != bp->rx_cur; i = ring_next(i)) {
410 		if (bp->rx_iobuf[i] != NULL)
411 			continue;
412 
413 		bp->rx_iobuf[i] = alloc_iob(RX_PKT_BUF_SZ);
414 		if (!bp->rx_iobuf[i]) {
415 			DBG("Refill rx ring failed!!\n");
416 			break;
417 		}
418 
419 		b44_populate_rx_descriptor(bp, i);
420 	}
421 }
422 
423 
b44_free_rx_ring(struct b44_private * bp)424 static void b44_free_rx_ring(struct b44_private *bp)
425 {
426 	u32 i;
427 
428 	if (bp->rx) {
429 		for (i = 0; i < B44_RING_SIZE; i++) {
430 			free_iob(bp->rx_iobuf[i]);
431 			bp->rx_iobuf[i] = NULL;
432 		}
433 		free_dma(bp->rx, B44_RX_RING_LEN_BYTES);
434 		bp->rx = NULL;
435 	}
436 }
437 
438 
b44_init_rx_ring(struct b44_private * bp)439 static int b44_init_rx_ring(struct b44_private *bp)
440 {
441 	b44_free_rx_ring(bp);
442 
443 	bp->rx = malloc_dma(B44_RX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
444 	if (!bp->rx)
445 		return -ENOMEM;
446 
447 	memset(bp->rx_iobuf, 0, sizeof(bp->rx_iobuf));
448 
449 	bp->rx_iobuf[0] = alloc_iob(RX_PKT_BUF_SZ);
450 	b44_populate_rx_descriptor(bp, 0);
451 	b44_rx_refill(bp, 0);
452 
453 	DBG("Init RX rings: rx=0x%08lx\n", VIRT_TO_B44(bp->rx));
454 	return 0;
455 }
456 
457 
b44_free_tx_ring(struct b44_private * bp)458 static void b44_free_tx_ring(struct b44_private *bp)
459 {
460 	if (bp->tx) {
461 		free_dma(bp->tx, B44_TX_RING_LEN_BYTES);
462 		bp->tx = NULL;
463 	}
464 }
465 
466 
b44_init_tx_ring(struct b44_private * bp)467 static int b44_init_tx_ring(struct b44_private *bp)
468 {
469 	b44_free_tx_ring(bp);
470 
471 	bp->tx = malloc_dma(B44_TX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
472 	if (!bp->tx)
473 		return -ENOMEM;
474 
475 	memset(bp->tx, 0, B44_TX_RING_LEN_BYTES);
476 	memset(bp->tx_iobuf, 0, sizeof(bp->tx_iobuf));
477 
478 	DBG("Init TX rings: tx=0x%08lx\n", VIRT_TO_B44(bp->tx));
479 	return 0;
480 }
481 
482 
483 /*** Interaction with the PHY ***/
484 
485 
b44_phy_read(struct b44_private * bp,int reg,u32 * val)486 static int b44_phy_read(struct b44_private *bp, int reg, u32 * val)
487 {
488 	int err;
489 
490 	u32 arg1 = (MDIO_OP_READ << MDIO_DATA_OP_SHIFT);
491 	u32 arg2 = (bp->phy_addr << MDIO_DATA_PMD_SHIFT);
492 	u32 arg3 = (reg << MDIO_DATA_RA_SHIFT);
493 	u32 arg4 = (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT);
494 	u32 argv = arg1 | arg2 | arg3 | arg4;
495 
496 	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
497 	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | argv));
498 	err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
499 	*val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
500 
501 	return err;
502 }
503 
504 
b44_phy_write(struct b44_private * bp,int reg,u32 val)505 static int b44_phy_write(struct b44_private *bp, int reg, u32 val)
506 {
507 	u32 arg1 = (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT);
508 	u32 arg2 = (bp->phy_addr << MDIO_DATA_PMD_SHIFT);
509 	u32 arg3 = (reg << MDIO_DATA_RA_SHIFT);
510 	u32 arg4 = (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT);
511 	u32 arg5 = (val & MDIO_DATA_DATA);
512 	u32 argv = arg1 | arg2 | arg3 | arg4 | arg5;
513 
514 
515 	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
516 	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | argv));
517 	return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
518 }
519 
520 
b44_phy_reset(struct b44_private * bp)521 static int b44_phy_reset(struct b44_private *bp)
522 {
523 	u32 val;
524 	int err;
525 
526 	err = b44_phy_write(bp, MII_BMCR, BMCR_RESET);
527 	if (err)
528 		return err;
529 
530 	udelay(100);
531 	err = b44_phy_read(bp, MII_BMCR, &val);
532 	if (!err) {
533 		if (val & BMCR_RESET) {
534 			return -ENODEV;
535 		}
536 	}
537 
538 	return 0;
539 }
540 
541 
542 /*
543  * The BCM44xx CAM (Content Addressable Memory) stores the MAC
544  * and PHY address.
545  */
b44_cam_write(struct b44_private * bp,unsigned char * data,int index)546 static void b44_cam_write(struct b44_private *bp, unsigned char *data,
547 			                                    int index)
548 {
549 	u32 val;
550 
551 	val  = ((u32) data[2]) << 24;
552 	val |= ((u32) data[3]) << 16;
553 	val |= ((u32) data[4]) << 8;
554 	val |= ((u32) data[5]) << 0;
555 	bw32(bp, B44_CAM_DATA_LO, val);
556 
557 
558 	val = (CAM_DATA_HI_VALID |
559 	       (((u32) data[0]) << 8) | (((u32) data[1]) << 0));
560 
561 	bw32(bp, B44_CAM_DATA_HI, val);
562 
563 	val = CAM_CTRL_WRITE | (index << CAM_CTRL_INDEX_SHIFT);
564 	bw32(bp, B44_CAM_CTRL, val);
565 
566 	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
567 }
568 
569 
b44_set_mac_addr(struct b44_private * bp)570 static void b44_set_mac_addr(struct b44_private *bp)
571 {
572 	u32 val;
573 	bw32(bp, B44_CAM_CTRL, 0);
574 	b44_cam_write(bp, bp->netdev->ll_addr, 0);
575 	val = br32(bp, B44_CAM_CTRL);
576 	bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
577 }
578 
579 
580 /* Read 128-bytes of EEPROM. */
b44_read_eeprom(struct b44_private * bp,u8 * data)581 static void b44_read_eeprom(struct b44_private *bp, u8 * data)
582 {
583 	long i;
584 	u16 *ptr = (u16 *) data;
585 
586 	for (i = 0; i < 128; i += 2)
587 		ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
588 }
589 
590 
b44_load_mac_and_phy_addr(struct b44_private * bp)591 static void b44_load_mac_and_phy_addr(struct b44_private *bp)
592 {
593 	u8 eeprom[128];
594 
595 	/* Load MAC address, note byteswapping */
596 	b44_read_eeprom(bp, &eeprom[0]);
597 	bp->netdev->hw_addr[0] = eeprom[79];
598 	bp->netdev->hw_addr[1] = eeprom[78];
599 	bp->netdev->hw_addr[2] = eeprom[81];
600 	bp->netdev->hw_addr[3] = eeprom[80];
601 	bp->netdev->hw_addr[4] = eeprom[83];
602 	bp->netdev->hw_addr[5] = eeprom[82];
603 
604 	/* Load PHY address */
605 	bp->phy_addr = eeprom[90] & 0x1f;
606 }
607 
608 
b44_set_rx_mode(struct net_device * netdev)609 static void b44_set_rx_mode(struct net_device *netdev)
610 {
611 	struct b44_private *bp = netdev_priv(netdev);
612 	unsigned char zero[6] = { 0, 0, 0, 0, 0, 0 };
613 	u32 val;
614 	int i;
615 
616 	val = br32(bp, B44_RXCONFIG);
617 	val &= ~RXCONFIG_PROMISC;
618 	val |= RXCONFIG_ALLMULTI;
619 
620 	b44_set_mac_addr(bp);
621 
622 	for (i = 1; i < 64; i++)
623 		b44_cam_write(bp, zero, i);
624 
625 	bw32(bp, B44_RXCONFIG, val);
626 	val = br32(bp, B44_CAM_CTRL);
627 	bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
628 }
629 
630 
631 /*** Implementation of gPXE driver callbacks ***/
632 
633 /**
634  * Probe device
635  *
636  * @v pci	PCI device
637  * @v id	Matching entry in ID table
638  * @ret rc	Return status code
639  */
b44_probe(struct pci_device * pci,const struct pci_device_id * id)640 static int b44_probe(struct pci_device *pci, const struct pci_device_id *id)
641 {
642 	struct net_device *netdev;
643 	struct b44_private *bp;
644 	int rc;
645 
646 	/*
647 	 * Bail out if more than 1GB of physical RAM is installed.
648 	 * This limitation will be removed later when dma mapping
649 	 * is merged into mainline.
650 	 */
651 	if (!phys_ram_within_limit(B44_30BIT_DMA_MASK)) {
652 		DBG("Sorry, this version of the driver does not\n"
653 		    "support systems with more than 1GB of RAM.\n");
654 		return -ENOMEM;
655 	}
656 
657 	/* Set up netdev */
658 	netdev = alloc_etherdev(sizeof(*bp));
659 	if (!netdev)
660 		return -ENOMEM;
661 
662 	netdev_init(netdev, &b44_operations);
663 	pci_set_drvdata(pci, netdev);
664 	netdev->dev = &pci->dev;
665 
666 	/* Set up private data */
667 	bp = netdev_priv(netdev);
668 	memset(bp, 0, sizeof(*bp));
669 	bp->netdev = netdev;
670 	bp->pci = pci;
671 
672 	/* Map device registers */
673 	bp->regs = ioremap(pci->membase, B44_REGS_SIZE);
674 	if (!bp->regs) {
675 		netdev_put(netdev);
676 		return -ENOMEM;
677 	}
678 
679 	/* Enable PCI bus mastering */
680 	adjust_pci_device(pci);
681 
682 	b44_load_mac_and_phy_addr(bp);
683 
684 	/* Link management currently not implemented */
685 	netdev_link_up(netdev);
686 
687 	rc = register_netdev(netdev);
688 	if (rc != 0) {
689 		iounmap(bp->regs);
690 		netdev_put(netdev);
691 		return rc;
692 	}
693 
694 	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
695 
696 	DBG("b44 %s (%04x:%04x) regs=%p MAC=%s\n", id->name, id->vendor,
697 	    id->device, bp->regs, eth_ntoa(netdev->ll_addr));
698 
699 	return 0;
700 }
701 
702 
703 /**
704  * Remove device
705  *
706  * @v pci	PCI device
707  */
b44_remove(struct pci_device * pci)708 static void b44_remove(struct pci_device *pci)
709 {
710 	struct net_device *netdev = pci_get_drvdata(pci);
711 	struct b44_private *bp = netdev_priv(netdev);
712 
713 	ssb_core_disable(bp);
714 	unregister_netdev(netdev);
715 	iounmap(bp->regs);
716 	netdev_nullify(netdev);
717 	netdev_put(netdev);
718 }
719 
720 
721 /** Enable or disable interrupts
722  *
723  * @v netdev	Network device
724  * @v enable	Interrupts should be enabled
725  */
b44_irq(struct net_device * netdev,int enable)726 static void b44_irq(struct net_device *netdev, int enable)
727 {
728 	struct b44_private *bp = netdev_priv(netdev);
729 
730 	/* Interrupt mask specifies which events generate interrupts */
731 	bw32(bp, B44_IMASK, enable ? IMASK_DEF : IMASK_DISABLE);
732 }
733 
734 
735 /** Open network device
736  *
737  * @v netdev	Network device
738  * @ret rc	Return status code
739  */
b44_open(struct net_device * netdev)740 static int b44_open(struct net_device *netdev)
741 {
742 	struct b44_private *bp = netdev_priv(netdev);
743 	int rc;
744 
745 	rc = b44_init_tx_ring(bp);
746 	if (rc != 0)
747 		return rc;
748 
749 	rc = b44_init_rx_ring(bp);
750 	if (rc != 0)
751 		return rc;
752 
753 	b44_init_hw(bp, B44_FULL_RESET);
754 
755 	/* Disable interrupts */
756 	b44_irq(netdev, 0);
757 
758 	return 0;
759 }
760 
761 
762 /** Close network device
763  *
764  * @v netdev	Network device
765  */
b44_close(struct net_device * netdev)766 static void b44_close(struct net_device *netdev)
767 {
768 	struct b44_private *bp = netdev_priv(netdev);
769 
770 	b44_chip_reset(bp, B44_FULL_RESET);
771 	b44_free_tx_ring(bp);
772 	b44_free_rx_ring(bp);
773 }
774 
775 
776 /** Transmit packet
777  *
778  * @v netdev	Network device
779  * @v iobuf	I/O buffer
780  * @ret rc	Return status code
781  */
b44_transmit(struct net_device * netdev,struct io_buffer * iobuf)782 static int b44_transmit(struct net_device *netdev, struct io_buffer *iobuf)
783 {
784 	struct b44_private *bp = netdev_priv(netdev);
785 	u32 cur = bp->tx_cur;
786 	u32 ctrl;
787 
788 	/* Check for TX ring overflow */
789 	if (bp->tx[cur].ctrl) {
790 		DBG("tx overflow\n");
791 		return -ENOBUFS;
792 	}
793 
794 	/* Will call netdev_tx_complete() on the iobuf later */
795 	bp->tx_iobuf[cur] = iobuf;
796 
797 	/* Set up TX descriptor */
798 	ctrl = (iob_len(iobuf) & DESC_CTRL_LEN) |
799 	    DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
800 
801 	if (cur == B44_RING_LAST)
802 		ctrl |= DESC_CTRL_EOT;
803 
804 	bp->tx[cur].ctrl = cpu_to_le32(ctrl);
805 	bp->tx[cur].addr = cpu_to_le32(VIRT_TO_B44(iobuf->data));
806 
807 	/* Update next available descriptor index */
808 	cur = ring_next(cur);
809 	bp->tx_cur = cur;
810 	wmb();
811 
812 	/* Tell card that a new TX descriptor is ready */
813 	bw32(bp, B44_DMATX_PTR, cur * sizeof(struct dma_desc));
814 	return 0;
815 }
816 
817 
818 /** Recycles sent TX descriptors and notifies network stack
819  *
820  * @v bp Driver state
821  */
b44_tx_complete(struct b44_private * bp)822 static void b44_tx_complete(struct b44_private *bp)
823 {
824 	u32 cur, i;
825 
826 	cur = pending_tx_index(bp);
827 
828 	for (i = bp->tx_dirty; i != cur; i = ring_next(i)) {
829 		/* Free finished frame */
830 		netdev_tx_complete(bp->netdev, bp->tx_iobuf[i]);
831 		bp->tx_iobuf[i] = NULL;
832 
833 		/* Clear TX descriptor */
834 		bp->tx[i].ctrl = 0;
835 		bp->tx[i].addr = 0;
836 	}
837 	bp->tx_dirty = cur;
838 }
839 
840 
b44_process_rx_packets(struct b44_private * bp)841 static void b44_process_rx_packets(struct b44_private *bp)
842 {
843 	struct io_buffer *iob;	/* received data */
844 	struct rx_header *rh;
845 	u32 pending, i;
846 	u16 len;
847 
848 	pending = pending_rx_index(bp);
849 
850 	for (i = bp->rx_cur; i != pending; i = ring_next(i)) {
851 		iob = bp->rx_iobuf[i];
852 		if (iob == NULL)
853 			break;
854 
855 		rh = iob->data;
856 		len = le16_to_cpu(rh->len);
857 
858 		/*
859 		 * Guard against incompletely written RX descriptors.
860 		 * Without this, things can get really slow!
861 		 */
862 		if (len == 0)
863 			break;
864 
865 		/* Discard CRC that is generated by the card */
866 		len -= 4;
867 
868 		/* Check for invalid packets and errors */
869 		if (len > RX_PKT_BUF_SZ - RX_PKT_OFFSET ||
870 		    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
871 			DBG("rx error len=%d flags=%04x\n", len,
872 			                 cpu_to_le16(rh->flags));
873 			rh->len = 0;
874 			rh->flags = 0;
875 			netdev_rx_err(bp->netdev, iob, -EINVAL);
876 			continue;
877 		}
878 
879 		/* Clear RX descriptor */
880 		rh->len = 0;
881 		rh->flags = 0;
882 		bp->rx_iobuf[i] = NULL;
883 
884 		/* Hand off the IO buffer to the network stack */
885 		iob_reserve(iob, RX_PKT_OFFSET);
886 		iob_put(iob, len);
887 		netdev_rx(bp->netdev, iob);
888 	}
889 	bp->rx_cur = i;
890 	b44_rx_refill(bp, pending_rx_index(bp));
891 }
892 
893 
894 /** Poll for completed and received packets
895  *
896  * @v netdev	Network device
897  */
b44_poll(struct net_device * netdev)898 static void b44_poll(struct net_device *netdev)
899 {
900 	struct b44_private *bp = netdev_priv(netdev);
901 	u32 istat;
902 
903 	/* Interrupt status */
904 	istat = br32(bp, B44_ISTAT);
905 	istat &= IMASK_DEF;	/* only the events we care about */
906 
907 	if (!istat)
908 		return;
909 	if (istat & ISTAT_TX)
910 		b44_tx_complete(bp);
911 	if (istat & ISTAT_RX)
912 		b44_process_rx_packets(bp);
913 	if (istat & ISTAT_ERRORS) {
914 		DBG("b44 error istat=0x%08x\n", istat);
915 
916 		/* Reset B44 core partially to avoid long waits */
917 		b44_irq(bp->netdev, 0);
918 		b44_halt(bp);
919 		b44_init_tx_ring(bp);
920 		b44_init_rx_ring(bp);
921 		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
922 	}
923 
924 	/* Acknowledge interrupt */
925 	bw32(bp, B44_ISTAT, 0);
926 	bflush(bp, B44_ISTAT, 1);
927 }
928 
929 
930 static struct net_device_operations b44_operations = {
931 	.open = b44_open,
932 	.close = b44_close,
933 	.transmit = b44_transmit,
934 	.poll = b44_poll,
935 	.irq = b44_irq,
936 };
937 
938 
939 static struct pci_device_id b44_nics[] = {
940 	PCI_ROM(0x14e4, 0x4401, "BCM4401", "BCM4401", 0),
941 	PCI_ROM(0x14e4, 0x170c, "BCM4401-B0", "BCM4401-B0", 0),
942 	PCI_ROM(0x14e4, 0x4402, "BCM4401-B1", "BCM4401-B1", 0),
943 };
944 
945 
946 struct pci_driver b44_driver __pci_driver = {
947 	.ids = b44_nics,
948 	.id_count = sizeof b44_nics / sizeof b44_nics[0],
949 	.probe = b44_probe,
950 	.remove = b44_remove,
951 };
952