1 /* $Id$
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002 Jeff Garzik (jgarzik@mandrakesoft.com)
6 * Copyright (C) 2003 Eric Biederman (ebiederman@lnxi.com) [etherboot port]
7 */
8
9 FILE_LICENCE ( GPL2_ONLY );
10
11 /* 11-13-2003 timlegge Fix Issue with NetGear GA302T
12 * 11-18-2003 ebiederm Generalize NetGear Fix to what the code was supposed to be.
13 * 01-06-2005 Alf (Frederic Olivie) Add Dell bcm 5751 (0x1677) support
14 * 04-15-2005 Martin Vogt Add Fujitsu Siemens Computer (FSC) 0x1734 bcm 5751 0x105d support
15 */
16
17 #include "etherboot.h"
18 #include "nic.h"
19 #include <errno.h>
20 #include <gpxe/pci.h>
21 #include <gpxe/ethernet.h>
22 #include "string.h"
23 #include <mii.h>
24 #include "tg3.h"
25
26 #define SUPPORT_COPPER_PHY 1
27 #define SUPPORT_FIBER_PHY 1
28 #define SUPPORT_LINK_REPORT 1
29 #define SUPPORT_PARTNO_STR 1
30 #define SUPPORT_PHY_STR 1
31
32 static struct tg3 tg3;
33
34 /* These numbers seem to be hard coded in the NIC firmware somehow.
35 * You can't change the ring sizes, but you can change where you place
36 * them in the NIC onboard memory.
37 */
38 #define TG3_RX_RING_SIZE 512
39 #define TG3_DEF_RX_RING_PENDING 20 /* RX_RING_PENDING seems to be o.k. at 20 and 200 */
40 #define TG3_RX_RCB_RING_SIZE 1024
41
42 /* (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ? \
43 512 : 1024) */
44 #define TG3_TX_RING_SIZE 512
45 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
46
47 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_RING_SIZE)
48 #define TG3_RX_RCB_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_RCB_RING_SIZE)
49
50 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * TG3_TX_RING_SIZE)
51 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
52 #define PREV_TX(N) (((N) - 1) & (TG3_TX_RING_SIZE - 1))
53
54 #define RX_PKT_BUF_SZ (1536 + 2 + 64)
55
56 struct eth_frame {
57 uint8_t dst_addr[ETH_ALEN];
58 uint8_t src_addr[ETH_ALEN];
59 uint16_t type;
60 uint8_t data [ETH_FRAME_LEN - ETH_HLEN];
61 };
62
63 struct bss {
64 struct tg3_rx_buffer_desc rx_std[TG3_RX_RING_SIZE];
65 struct tg3_rx_buffer_desc rx_rcb[TG3_RX_RCB_RING_SIZE];
66 struct tg3_tx_buffer_desc tx_ring[TG3_TX_RING_SIZE];
67 struct tg3_hw_status hw_status;
68 struct tg3_hw_stats hw_stats;
69 unsigned char rx_bufs[TG3_DEF_RX_RING_PENDING][RX_PKT_BUF_SZ];
70 struct eth_frame tx_frame[2];
71 } tg3_bss __shared;
72
73 /**
74 * pci_save_state - save the PCI configuration space of a device before suspending
75 * @dev: - PCI device that we're dealing with
76 * @buffer: - buffer to hold config space context
77 *
78 * @buffer must be large enough to hold the entire PCI 2.2 config space
79 * (>= 64 bytes).
80 */
pci_save_state(struct pci_device * dev,uint32_t * buffer)81 static int pci_save_state(struct pci_device *dev, uint32_t *buffer)
82 {
83 int i;
84 for (i = 0; i < 16; i++)
85 pci_read_config_dword(dev, i * 4,&buffer[i]);
86 return 0;
87 }
88
89 /**
90 * pci_restore_state - Restore the saved state of a PCI device
91 * @dev: - PCI device that we're dealing with
92 * @buffer: - saved PCI config space
93 *
94 */
pci_restore_state(struct pci_device * dev,uint32_t * buffer)95 static int pci_restore_state(struct pci_device *dev, uint32_t *buffer)
96 {
97 int i;
98
99 for (i = 0; i < 16; i++)
100 pci_write_config_dword(dev,i * 4, buffer[i]);
101 return 0;
102 }
103
tg3_write_indirect_reg32(uint32_t off,uint32_t val)104 static void tg3_write_indirect_reg32(uint32_t off, uint32_t val)
105 {
106 pci_write_config_dword(tg3.pdev, TG3PCI_REG_BASE_ADDR, off);
107 pci_write_config_dword(tg3.pdev, TG3PCI_REG_DATA, val);
108 }
109
110 #define tw32(reg,val) tg3_write_indirect_reg32((reg),(val))
111 #define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tg3.regs + (reg))
112 #define tw16(reg,val) writew(((val) & 0xffff), tg3.regs + (reg))
113 #define tw8(reg,val) writeb(((val) & 0xff), tg3.regs + (reg))
114 #define tr32(reg) readl(tg3.regs + (reg))
115 #define tr16(reg) readw(tg3.regs + (reg))
116 #define tr8(reg) readb(tg3.regs + (reg))
117
tw32_carefully(uint32_t reg,uint32_t val)118 static void tw32_carefully(uint32_t reg, uint32_t val)
119 {
120 tw32(reg, val);
121 tr32(reg);
122 udelay(100);
123 }
124
tw32_mailbox2(uint32_t reg,uint32_t val)125 static void tw32_mailbox2(uint32_t reg, uint32_t val)
126 {
127 tw32_mailbox(reg, val);
128 tr32(reg);
129 }
130
tg3_write_mem(uint32_t off,uint32_t val)131 static void tg3_write_mem(uint32_t off, uint32_t val)
132 {
133 pci_write_config_dword(tg3.pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
134 pci_write_config_dword(tg3.pdev, TG3PCI_MEM_WIN_DATA, val);
135
136 /* Always leave this as zero. */
137 pci_write_config_dword(tg3.pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
138 }
139
tg3_read_mem(uint32_t off,uint32_t * val)140 static void tg3_read_mem(uint32_t off, uint32_t *val)
141 {
142 pci_write_config_dword(tg3.pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
143 pci_read_config_dword(tg3.pdev, TG3PCI_MEM_WIN_DATA, val);
144
145 /* Always leave this as zero. */
146 pci_write_config_dword(tg3.pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
147 }
148
tg3_disable_ints(struct tg3 * tp)149 static void tg3_disable_ints(struct tg3 *tp)
150 {
151 tw32(TG3PCI_MISC_HOST_CTRL,
152 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
153 tw32_mailbox2(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
154 }
155
tg3_switch_clocks(struct tg3 * tp)156 static void tg3_switch_clocks(struct tg3 *tp)
157 {
158 uint32_t orig_clock_ctrl, clock_ctrl;
159
160 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
161
162 orig_clock_ctrl = clock_ctrl;
163 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | CLOCK_CTRL_CLKRUN_OENABLE | 0x1f);
164 tp->pci_clock_ctrl = clock_ctrl;
165
166 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) &&
167 (!((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
168 && (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) &&
169 (orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE)!=0) {
170 tw32_carefully(TG3PCI_CLOCK_CTRL,
171 clock_ctrl | (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
172 tw32_carefully(TG3PCI_CLOCK_CTRL,
173 clock_ctrl | (CLOCK_CTRL_ALTCLK));
174 }
175 tw32_carefully(TG3PCI_CLOCK_CTRL, clock_ctrl);
176 }
177
178 #define PHY_BUSY_LOOPS 5000
179
tg3_readphy(struct tg3 * tp,int reg,uint32_t * val)180 static int tg3_readphy(struct tg3 *tp, int reg, uint32_t *val)
181 {
182 uint32_t frame_val;
183 int loops, ret;
184
185 tw32_carefully(MAC_MI_MODE, tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL);
186
187 *val = 0xffffffff;
188
189 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
190 MI_COM_PHY_ADDR_MASK);
191 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
192 MI_COM_REG_ADDR_MASK);
193 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
194
195 tw32_carefully(MAC_MI_COM, frame_val);
196
197 loops = PHY_BUSY_LOOPS;
198 while (loops-- > 0) {
199 udelay(10);
200 frame_val = tr32(MAC_MI_COM);
201
202 if ((frame_val & MI_COM_BUSY) == 0) {
203 udelay(5);
204 frame_val = tr32(MAC_MI_COM);
205 break;
206 }
207 }
208
209 ret = -EBUSY;
210 if (loops > 0) {
211 *val = frame_val & MI_COM_DATA_MASK;
212 ret = 0;
213 }
214
215 tw32_carefully(MAC_MI_MODE, tp->mi_mode);
216
217 return ret;
218 }
219
tg3_writephy(struct tg3 * tp,int reg,uint32_t val)220 static int tg3_writephy(struct tg3 *tp, int reg, uint32_t val)
221 {
222 uint32_t frame_val;
223 int loops, ret;
224
225 tw32_carefully(MAC_MI_MODE, tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL);
226
227 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
228 MI_COM_PHY_ADDR_MASK);
229 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
230 MI_COM_REG_ADDR_MASK);
231 frame_val |= (val & MI_COM_DATA_MASK);
232 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
233
234 tw32_carefully(MAC_MI_COM, frame_val);
235
236 loops = PHY_BUSY_LOOPS;
237 while (loops-- > 0) {
238 udelay(10);
239 frame_val = tr32(MAC_MI_COM);
240 if ((frame_val & MI_COM_BUSY) == 0) {
241 udelay(5);
242 frame_val = tr32(MAC_MI_COM);
243 break;
244 }
245 }
246
247 ret = -EBUSY;
248 if (loops > 0)
249 ret = 0;
250
251 tw32_carefully(MAC_MI_MODE, tp->mi_mode);
252
253 return ret;
254 }
255
tg3_writedsp(struct tg3 * tp,uint16_t addr,uint16_t val)256 static int tg3_writedsp(struct tg3 *tp, uint16_t addr, uint16_t val)
257 {
258 int err;
259 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, addr);
260 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
261 return err;
262 }
263
264
tg3_phy_set_wirespeed(struct tg3 * tp)265 static void tg3_phy_set_wirespeed(struct tg3 *tp)
266 {
267 uint32_t val;
268
269 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
270 return;
271
272 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007);
273 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
274 tg3_writephy(tp, MII_TG3_AUX_CTRL, (val | (1 << 15) | (1 << 4)));
275 }
276
tg3_bmcr_reset(struct tg3 * tp)277 static int tg3_bmcr_reset(struct tg3 *tp)
278 {
279 uint32_t phy_control;
280 int limit, err;
281
282 /* OK, reset it, and poll the BMCR_RESET bit until it
283 * clears or we time out.
284 */
285 phy_control = BMCR_RESET;
286 err = tg3_writephy(tp, MII_BMCR, phy_control);
287 if (err != 0)
288 return -EBUSY;
289
290 limit = 5000;
291 while (limit--) {
292 err = tg3_readphy(tp, MII_BMCR, &phy_control);
293 if (err != 0)
294 return -EBUSY;
295
296 if ((phy_control & BMCR_RESET) == 0) {
297 udelay(40);
298 break;
299 }
300 udelay(10);
301 }
302 if (limit <= 0)
303 return -EBUSY;
304
305 return 0;
306 }
307
tg3_wait_macro_done(struct tg3 * tp)308 static int tg3_wait_macro_done(struct tg3 *tp)
309 {
310 int limit = 100;
311
312 while (limit--) {
313 uint32_t tmp32;
314
315 tg3_readphy(tp, 0x16, &tmp32);
316 if ((tmp32 & 0x1000) == 0)
317 break;
318 }
319 if (limit <= 0)
320 return -EBUSY;
321
322 return 0;
323 }
324
tg3_phy_write_and_check_testpat(struct tg3 * tp,int * resetp)325 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
326 {
327 static const uint32_t test_pat[4][6] = {
328 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
329 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
330 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
331 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
332 };
333 int chan;
334
335 for (chan = 0; chan < 4; chan++) {
336 int i;
337
338 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
339 (chan * 0x2000) | 0x0200);
340 tg3_writephy(tp, 0x16, 0x0002);
341
342 for (i = 0; i < 6; i++)
343 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
344 test_pat[chan][i]);
345
346 tg3_writephy(tp, 0x16, 0x0202);
347 if (tg3_wait_macro_done(tp)) {
348 *resetp = 1;
349 return -EBUSY;
350 }
351
352 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
353 (chan * 0x2000) | 0x0200);
354 tg3_writephy(tp, 0x16, 0x0082);
355 if (tg3_wait_macro_done(tp)) {
356 *resetp = 1;
357 return -EBUSY;
358 }
359
360 tg3_writephy(tp, 0x16, 0x0802);
361 if (tg3_wait_macro_done(tp)) {
362 *resetp = 1;
363 return -EBUSY;
364 }
365
366 for (i = 0; i < 6; i += 2) {
367 uint32_t low, high;
368
369 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low);
370 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high);
371 if (tg3_wait_macro_done(tp)) {
372 *resetp = 1;
373 return -EBUSY;
374 }
375 low &= 0x7fff;
376 high &= 0x000f;
377 if (low != test_pat[chan][i] ||
378 high != test_pat[chan][i+1]) {
379 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
380 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
381 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
382
383 return -EBUSY;
384 }
385 }
386 }
387
388 return 0;
389 }
390
tg3_phy_reset_chanpat(struct tg3 * tp)391 static int tg3_phy_reset_chanpat(struct tg3 *tp)
392 {
393 int chan;
394
395 for (chan = 0; chan < 4; chan++) {
396 int i;
397
398 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
399 (chan * 0x2000) | 0x0200);
400 tg3_writephy(tp, 0x16, 0x0002);
401 for (i = 0; i < 6; i++)
402 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
403 tg3_writephy(tp, 0x16, 0x0202);
404 if (tg3_wait_macro_done(tp))
405 return -EBUSY;
406 }
407
408 return 0;
409 }
410
tg3_phy_reset_5703_4_5(struct tg3 * tp)411 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
412 {
413 uint32_t reg32, phy9_orig;
414 int retries, do_phy_reset, err;
415
416 retries = 10;
417 do_phy_reset = 1;
418 do {
419 if (do_phy_reset) {
420 err = tg3_bmcr_reset(tp);
421 if (err)
422 return err;
423 do_phy_reset = 0;
424 }
425
426 /* Disable transmitter and interrupt. */
427 tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
428 reg32 |= 0x3000;
429 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
430
431 /* Set full-duplex, 1000 mbps. */
432 tg3_writephy(tp, MII_BMCR,
433 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
434
435 /* Set to master mode. */
436 tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig);
437 tg3_writephy(tp, MII_TG3_CTRL,
438 (MII_TG3_CTRL_AS_MASTER |
439 MII_TG3_CTRL_ENABLE_AS_MASTER));
440
441 /* Enable SM_DSP_CLOCK and 6dB. */
442 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
443
444 /* Block the PHY control access. */
445 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
446 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
447
448 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
449 if (!err)
450 break;
451 } while (--retries);
452
453 err = tg3_phy_reset_chanpat(tp);
454 if (err)
455 return err;
456
457 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
458 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
459
460 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
461 tg3_writephy(tp, 0x16, 0x0000);
462
463 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
464
465 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
466
467 tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
468 reg32 &= ~0x3000;
469 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
470
471 return err;
472 }
473
474 /* This will reset the tigon3 PHY if there is no valid
475 * link.
476 */
tg3_phy_reset(struct tg3 * tp)477 static int tg3_phy_reset(struct tg3 *tp)
478 {
479 uint32_t phy_status;
480 int err;
481
482 err = tg3_readphy(tp, MII_BMSR, &phy_status);
483 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
484 if (err != 0)
485 return -EBUSY;
486
487 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
488 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
489 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
490 err = tg3_phy_reset_5703_4_5(tp);
491 if (err)
492 return err;
493 goto out;
494 }
495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
496 // Taken from Broadcom's source code
497 tg3_writephy(tp, 0x18, 0x0c00);
498 tg3_writephy(tp, 0x17, 0x000a);
499 tg3_writephy(tp, 0x15, 0x310b);
500 tg3_writephy(tp, 0x17, 0x201f);
501 tg3_writephy(tp, 0x15, 0x9506);
502 tg3_writephy(tp, 0x17, 0x401f);
503 tg3_writephy(tp, 0x15, 0x14e2);
504 tg3_writephy(tp, 0x18, 0x0400);
505 }
506 err = tg3_bmcr_reset(tp);
507 if (err)
508 return err;
509 out:
510 tg3_phy_set_wirespeed(tp);
511 return 0;
512 }
513
tg3_set_power_state_0(struct tg3 * tp)514 static void tg3_set_power_state_0(struct tg3 *tp)
515 {
516 uint16_t power_control;
517 int pm = tp->pm_cap;
518
519 /* Make sure register accesses (indirect or otherwise)
520 * will function correctly.
521 */
522 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
523
524 pci_read_config_word(tp->pdev, pm + PCI_PM_CTRL, &power_control);
525
526 power_control |= PCI_PM_CTRL_PME_STATUS;
527 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
528 power_control |= 0;
529 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
530
531 tw32_carefully(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
532
533 return;
534 }
535
536
537 #if SUPPORT_LINK_REPORT
tg3_link_report(struct tg3 * tp)538 static void tg3_link_report(struct tg3 *tp)
539 {
540 if (!tp->carrier_ok) {
541 printf("Link is down.\n");
542 } else {
543 printf("Link is up at %d Mbps, %s duplex. %s %s %s\n",
544 (tp->link_config.active_speed == SPEED_1000 ?
545 1000 :
546 (tp->link_config.active_speed == SPEED_100 ?
547 100 : 10)),
548 (tp->link_config.active_duplex == DUPLEX_FULL ?
549 "full" : "half"),
550 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "TX" : "",
551 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "RX" : "",
552 (tp->tg3_flags & (TG3_FLAG_TX_PAUSE |TG3_FLAG_RX_PAUSE)) ? "flow control" : "");
553 }
554 }
555 #else
556 #define tg3_link_report(tp)
557 #endif
558
tg3_setup_flow_control(struct tg3 * tp,uint32_t local_adv,uint32_t remote_adv)559 static void tg3_setup_flow_control(struct tg3 *tp, uint32_t local_adv, uint32_t remote_adv)
560 {
561 uint32_t new_tg3_flags = 0;
562
563 if (local_adv & ADVERTISE_PAUSE_CAP) {
564 if (local_adv & ADVERTISE_PAUSE_ASYM) {
565 if (remote_adv & LPA_PAUSE_CAP)
566 new_tg3_flags |=
567 (TG3_FLAG_RX_PAUSE |
568 TG3_FLAG_TX_PAUSE);
569 else if (remote_adv & LPA_PAUSE_ASYM)
570 new_tg3_flags |=
571 (TG3_FLAG_RX_PAUSE);
572 } else {
573 if (remote_adv & LPA_PAUSE_CAP)
574 new_tg3_flags |=
575 (TG3_FLAG_RX_PAUSE |
576 TG3_FLAG_TX_PAUSE);
577 }
578 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
579 if ((remote_adv & LPA_PAUSE_CAP) &&
580 (remote_adv & LPA_PAUSE_ASYM))
581 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
582 }
583
584 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
585 tp->tg3_flags |= new_tg3_flags;
586
587 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
588 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
589 else
590 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
591
592 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
593 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
594 else
595 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
596 }
597
598 #if SUPPORT_COPPER_PHY
tg3_aux_stat_to_speed_duplex(struct tg3 * tp __unused,uint32_t val,uint8_t * speed,uint8_t * duplex)599 static void tg3_aux_stat_to_speed_duplex(
600 struct tg3 *tp __unused, uint32_t val, uint8_t *speed, uint8_t *duplex)
601 {
602 static const uint8_t map[] = {
603 [0] = (SPEED_INVALID << 2) | DUPLEX_INVALID,
604 [MII_TG3_AUX_STAT_10HALF >> 8] = (SPEED_10 << 2) | DUPLEX_HALF,
605 [MII_TG3_AUX_STAT_10FULL >> 8] = (SPEED_10 << 2) | DUPLEX_FULL,
606 [MII_TG3_AUX_STAT_100HALF >> 8] = (SPEED_100 << 2) | DUPLEX_HALF,
607 [MII_TG3_AUX_STAT_100_4 >> 8] = (SPEED_INVALID << 2) | DUPLEX_INVALID,
608 [MII_TG3_AUX_STAT_100FULL >> 8] = (SPEED_100 << 2) | DUPLEX_FULL,
609 [MII_TG3_AUX_STAT_1000HALF >> 8] = (SPEED_1000 << 2) | DUPLEX_HALF,
610 [MII_TG3_AUX_STAT_1000FULL >> 8] = (SPEED_1000 << 2) | DUPLEX_FULL,
611 };
612 uint8_t result;
613 result = map[(val & MII_TG3_AUX_STAT_SPDMASK) >> 8];
614 *speed = result >> 2;
615 *duplex = result & 3;
616 }
617
tg3_phy_copper_begin(struct tg3 * tp)618 static int tg3_phy_copper_begin(struct tg3 *tp)
619 {
620 uint32_t new_adv;
621
622 tp->link_config.advertising =
623 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
624 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
625 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
626 ADVERTISED_Autoneg | ADVERTISED_MII);
627
628 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) {
629 tp->link_config.advertising &=
630 ~(ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
631 }
632
633 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
634 if (tp->link_config.advertising & ADVERTISED_10baseT_Half) {
635 new_adv |= ADVERTISE_10HALF;
636 }
637 if (tp->link_config.advertising & ADVERTISED_10baseT_Full) {
638 new_adv |= ADVERTISE_10FULL;
639 }
640 if (tp->link_config.advertising & ADVERTISED_100baseT_Half) {
641 new_adv |= ADVERTISE_100HALF;
642 }
643 if (tp->link_config.advertising & ADVERTISED_100baseT_Full) {
644 new_adv |= ADVERTISE_100FULL;
645 }
646 tg3_writephy(tp, MII_ADVERTISE, new_adv);
647
648 if (tp->link_config.advertising &
649 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
650 new_adv = 0;
651 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) {
652 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
653 }
654 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) {
655 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
656 }
657 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
658 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
659 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
660 new_adv |= (MII_TG3_CTRL_AS_MASTER |
661 MII_TG3_CTRL_ENABLE_AS_MASTER);
662 }
663 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
664 } else {
665 tg3_writephy(tp, MII_TG3_CTRL, 0);
666 }
667
668 tg3_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
669
670 return 0;
671 }
672
tg3_init_5401phy_dsp(struct tg3 * tp)673 static int tg3_init_5401phy_dsp(struct tg3 *tp)
674 {
675 int err;
676
677 /* Turn off tap power management. */
678 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c20);
679
680 err |= tg3_writedsp(tp, 0x0012, 0x1804);
681 err |= tg3_writedsp(tp, 0x0013, 0x1204);
682 err |= tg3_writedsp(tp, 0x8006, 0x0132);
683 err |= tg3_writedsp(tp, 0x8006, 0x0232);
684 err |= tg3_writedsp(tp, 0x201f, 0x0a20);
685
686 udelay(40);
687
688 return err;
689 }
690
tg3_setup_copper_phy(struct tg3 * tp)691 static int tg3_setup_copper_phy(struct tg3 *tp)
692 {
693 int current_link_up;
694 uint32_t bmsr, dummy;
695 int i, err;
696
697 tw32_carefully(MAC_STATUS,
698 (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED
699 | MAC_STATUS_LNKSTATE_CHANGED));
700
701 tp->mi_mode = MAC_MI_MODE_BASE;
702 tw32_carefully(MAC_MI_MODE, tp->mi_mode);
703
704 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
705
706 /* Some third-party PHYs need to be reset on link going
707 * down.
708 */
709 if ( ( (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
710 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
711 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)) &&
712 (tp->carrier_ok)) {
713 tg3_readphy(tp, MII_BMSR, &bmsr);
714 tg3_readphy(tp, MII_BMSR, &bmsr);
715 if (!(bmsr & BMSR_LSTATUS))
716 tg3_phy_reset(tp);
717 }
718
719 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
720 tg3_readphy(tp, MII_BMSR, &bmsr);
721 tg3_readphy(tp, MII_BMSR, &bmsr);
722
723 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
724 bmsr = 0;
725
726 if (!(bmsr & BMSR_LSTATUS)) {
727 err = tg3_init_5401phy_dsp(tp);
728 if (err)
729 return err;
730
731 tg3_readphy(tp, MII_BMSR, &bmsr);
732 for (i = 0; i < 1000; i++) {
733 udelay(10);
734 tg3_readphy(tp, MII_BMSR, &bmsr);
735 if (bmsr & BMSR_LSTATUS) {
736 udelay(40);
737 break;
738 }
739 }
740
741 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
742 !(bmsr & BMSR_LSTATUS) &&
743 tp->link_config.active_speed == SPEED_1000) {
744 err = tg3_phy_reset(tp);
745 if (!err)
746 err = tg3_init_5401phy_dsp(tp);
747 if (err)
748 return err;
749 }
750 }
751 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
752 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
753 /* 5701 {A0,B0} CRC bug workaround */
754 tg3_writephy(tp, 0x15, 0x0a75);
755 tg3_writephy(tp, 0x1c, 0x8c68);
756 tg3_writephy(tp, 0x1c, 0x8d68);
757 tg3_writephy(tp, 0x1c, 0x8c68);
758 }
759
760 /* Clear pending interrupts... */
761 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
762 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
763
764 tg3_writephy(tp, MII_TG3_IMASK, ~0);
765
766 if (tp->led_mode == led_mode_three_link)
767 tg3_writephy(tp, MII_TG3_EXT_CTRL,
768 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
769 else
770 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
771
772 current_link_up = 0;
773
774 tg3_readphy(tp, MII_BMSR, &bmsr);
775 tg3_readphy(tp, MII_BMSR, &bmsr);
776
777 if (bmsr & BMSR_LSTATUS) {
778 uint32_t aux_stat, bmcr;
779
780 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
781 for (i = 0; i < 2000; i++) {
782 udelay(10);
783 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
784 if (aux_stat)
785 break;
786 }
787
788 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
789 &tp->link_config.active_speed,
790 &tp->link_config.active_duplex);
791 tg3_readphy(tp, MII_BMCR, &bmcr);
792 tg3_readphy(tp, MII_BMCR, &bmcr);
793 if (bmcr & BMCR_ANENABLE) {
794 uint32_t gig_ctrl;
795
796 current_link_up = 1;
797
798 /* Force autoneg restart if we are exiting
799 * low power mode.
800 */
801 tg3_readphy(tp, MII_TG3_CTRL, &gig_ctrl);
802 if (!(gig_ctrl & (MII_TG3_CTRL_ADV_1000_HALF |
803 MII_TG3_CTRL_ADV_1000_FULL))) {
804 current_link_up = 0;
805 }
806 } else {
807 current_link_up = 0;
808 }
809 }
810
811 if (current_link_up == 1 &&
812 (tp->link_config.active_duplex == DUPLEX_FULL)) {
813 uint32_t local_adv, remote_adv;
814
815 tg3_readphy(tp, MII_ADVERTISE, &local_adv);
816 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
817
818 tg3_readphy(tp, MII_LPA, &remote_adv);
819 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
820
821 /* If we are not advertising full pause capability,
822 * something is wrong. Bring the link down and reconfigure.
823 */
824 if (local_adv != ADVERTISE_PAUSE_CAP) {
825 current_link_up = 0;
826 } else {
827 tg3_setup_flow_control(tp, local_adv, remote_adv);
828 }
829 }
830
831 if (current_link_up == 0) {
832 uint32_t tmp;
833
834 tg3_phy_copper_begin(tp);
835
836 tg3_readphy(tp, MII_BMSR, &tmp);
837 tg3_readphy(tp, MII_BMSR, &tmp);
838 if (tmp & BMSR_LSTATUS)
839 current_link_up = 1;
840 }
841
842 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
843 if (current_link_up == 1) {
844 if (tp->link_config.active_speed == SPEED_100 ||
845 tp->link_config.active_speed == SPEED_10)
846 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
847 else
848 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
849 } else
850 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
851
852 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
853 if (tp->link_config.active_duplex == DUPLEX_HALF)
854 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
855
856 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
857 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
858 if ((tp->led_mode == led_mode_link10) ||
859 (current_link_up == 1 &&
860 tp->link_config.active_speed == SPEED_10))
861 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
862 } else {
863 if (current_link_up == 1)
864 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
865 tw32(MAC_LED_CTRL, LED_CTRL_PHY_MODE_1);
866 }
867
868 /* ??? Without this setting Netgear GA302T PHY does not
869 * ??? send/receive packets...
870 * With this other PHYs cannot bring up the link
871 */
872 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
873 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
874 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
875 tw32_carefully(MAC_MI_MODE, tp->mi_mode);
876 }
877
878 tw32_carefully(MAC_MODE, tp->mac_mode);
879
880 /* Link change polled. */
881 tw32_carefully(MAC_EVENT, 0);
882
883 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
884 current_link_up == 1 &&
885 tp->link_config.active_speed == SPEED_1000 &&
886 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
887 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
888 udelay(120);
889 tw32_carefully(MAC_STATUS,
890 (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED));
891 tg3_write_mem(
892 NIC_SRAM_FIRMWARE_MBOX,
893 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
894 }
895
896 if (current_link_up != tp->carrier_ok) {
897 tp->carrier_ok = current_link_up;
898 tg3_link_report(tp);
899 }
900
901 return 0;
902 }
903 #else
904 #define tg3_setup_copper_phy(TP) (-EINVAL)
905 #endif /* SUPPORT_COPPER_PHY */
906
907 #if SUPPORT_FIBER_PHY
908 struct tg3_fiber_aneginfo {
909 int state;
910 #define ANEG_STATE_UNKNOWN 0
911 #define ANEG_STATE_AN_ENABLE 1
912 #define ANEG_STATE_RESTART_INIT 2
913 #define ANEG_STATE_RESTART 3
914 #define ANEG_STATE_DISABLE_LINK_OK 4
915 #define ANEG_STATE_ABILITY_DETECT_INIT 5
916 #define ANEG_STATE_ABILITY_DETECT 6
917 #define ANEG_STATE_ACK_DETECT_INIT 7
918 #define ANEG_STATE_ACK_DETECT 8
919 #define ANEG_STATE_COMPLETE_ACK_INIT 9
920 #define ANEG_STATE_COMPLETE_ACK 10
921 #define ANEG_STATE_IDLE_DETECT_INIT 11
922 #define ANEG_STATE_IDLE_DETECT 12
923 #define ANEG_STATE_LINK_OK 13
924 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
925 #define ANEG_STATE_NEXT_PAGE_WAIT 15
926
927 uint32_t flags;
928 #define MR_AN_ENABLE 0x00000001
929 #define MR_RESTART_AN 0x00000002
930 #define MR_AN_COMPLETE 0x00000004
931 #define MR_PAGE_RX 0x00000008
932 #define MR_NP_LOADED 0x00000010
933 #define MR_TOGGLE_TX 0x00000020
934 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
935 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
936 #define MR_LP_ADV_SYM_PAUSE 0x00000100
937 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
938 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
939 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
940 #define MR_LP_ADV_NEXT_PAGE 0x00001000
941 #define MR_TOGGLE_RX 0x00002000
942 #define MR_NP_RX 0x00004000
943
944 #define MR_LINK_OK 0x80000000
945
946 unsigned long link_time, cur_time;
947
948 uint32_t ability_match_cfg;
949 int ability_match_count;
950
951 char ability_match, idle_match, ack_match;
952
953 uint32_t txconfig, rxconfig;
954 #define ANEG_CFG_NP 0x00000080
955 #define ANEG_CFG_ACK 0x00000040
956 #define ANEG_CFG_RF2 0x00000020
957 #define ANEG_CFG_RF1 0x00000010
958 #define ANEG_CFG_PS2 0x00000001
959 #define ANEG_CFG_PS1 0x00008000
960 #define ANEG_CFG_HD 0x00004000
961 #define ANEG_CFG_FD 0x00002000
962 #define ANEG_CFG_INVAL 0x00001f06
963
964 };
965 #define ANEG_OK 0
966 #define ANEG_DONE 1
967 #define ANEG_TIMER_ENAB 2
968 #define ANEG_FAILED -1
969
970 #define ANEG_STATE_SETTLE_TIME 10000
971
tg3_fiber_aneg_smachine(struct tg3 * tp,struct tg3_fiber_aneginfo * ap)972 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
973 struct tg3_fiber_aneginfo *ap)
974 {
975 unsigned long delta;
976 uint32_t rx_cfg_reg;
977 int ret;
978
979 if (ap->state == ANEG_STATE_UNKNOWN) {
980 ap->rxconfig = 0;
981 ap->link_time = 0;
982 ap->cur_time = 0;
983 ap->ability_match_cfg = 0;
984 ap->ability_match_count = 0;
985 ap->ability_match = 0;
986 ap->idle_match = 0;
987 ap->ack_match = 0;
988 }
989 ap->cur_time++;
990
991 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
992 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
993
994 if (rx_cfg_reg != ap->ability_match_cfg) {
995 ap->ability_match_cfg = rx_cfg_reg;
996 ap->ability_match = 0;
997 ap->ability_match_count = 0;
998 } else {
999 if (++ap->ability_match_count > 1) {
1000 ap->ability_match = 1;
1001 ap->ability_match_cfg = rx_cfg_reg;
1002 }
1003 }
1004 if (rx_cfg_reg & ANEG_CFG_ACK)
1005 ap->ack_match = 1;
1006 else
1007 ap->ack_match = 0;
1008
1009 ap->idle_match = 0;
1010 } else {
1011 ap->idle_match = 1;
1012 ap->ability_match_cfg = 0;
1013 ap->ability_match_count = 0;
1014 ap->ability_match = 0;
1015 ap->ack_match = 0;
1016
1017 rx_cfg_reg = 0;
1018 }
1019
1020 ap->rxconfig = rx_cfg_reg;
1021 ret = ANEG_OK;
1022
1023 switch(ap->state) {
1024 case ANEG_STATE_UNKNOWN:
1025 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1026 ap->state = ANEG_STATE_AN_ENABLE;
1027
1028 /* fallthru */
1029 case ANEG_STATE_AN_ENABLE:
1030 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1031 if (ap->flags & MR_AN_ENABLE) {
1032 ap->link_time = 0;
1033 ap->cur_time = 0;
1034 ap->ability_match_cfg = 0;
1035 ap->ability_match_count = 0;
1036 ap->ability_match = 0;
1037 ap->idle_match = 0;
1038 ap->ack_match = 0;
1039
1040 ap->state = ANEG_STATE_RESTART_INIT;
1041 } else {
1042 ap->state = ANEG_STATE_DISABLE_LINK_OK;
1043 }
1044 break;
1045
1046 case ANEG_STATE_RESTART_INIT:
1047 ap->link_time = ap->cur_time;
1048 ap->flags &= ~(MR_NP_LOADED);
1049 ap->txconfig = 0;
1050 tw32(MAC_TX_AUTO_NEG, 0);
1051 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1052 tw32_carefully(MAC_MODE, tp->mac_mode);
1053
1054 ret = ANEG_TIMER_ENAB;
1055 ap->state = ANEG_STATE_RESTART;
1056
1057 /* fallthru */
1058 case ANEG_STATE_RESTART:
1059 delta = ap->cur_time - ap->link_time;
1060 if (delta > ANEG_STATE_SETTLE_TIME) {
1061 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1062 } else {
1063 ret = ANEG_TIMER_ENAB;
1064 }
1065 break;
1066
1067 case ANEG_STATE_DISABLE_LINK_OK:
1068 ret = ANEG_DONE;
1069 break;
1070
1071 case ANEG_STATE_ABILITY_DETECT_INIT:
1072 ap->flags &= ~(MR_TOGGLE_TX);
1073 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1074 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1075 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1076 tw32_carefully(MAC_MODE, tp->mac_mode);
1077
1078 ap->state = ANEG_STATE_ABILITY_DETECT;
1079 break;
1080
1081 case ANEG_STATE_ABILITY_DETECT:
1082 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1083 ap->state = ANEG_STATE_ACK_DETECT_INIT;
1084 }
1085 break;
1086
1087 case ANEG_STATE_ACK_DETECT_INIT:
1088 ap->txconfig |= ANEG_CFG_ACK;
1089 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1090 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1091 tw32_carefully(MAC_MODE, tp->mac_mode);
1092
1093 ap->state = ANEG_STATE_ACK_DETECT;
1094
1095 /* fallthru */
1096 case ANEG_STATE_ACK_DETECT:
1097 if (ap->ack_match != 0) {
1098 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1099 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1100 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1101 } else {
1102 ap->state = ANEG_STATE_AN_ENABLE;
1103 }
1104 } else if (ap->ability_match != 0 &&
1105 ap->rxconfig == 0) {
1106 ap->state = ANEG_STATE_AN_ENABLE;
1107 }
1108 break;
1109
1110 case ANEG_STATE_COMPLETE_ACK_INIT:
1111 if (ap->rxconfig & ANEG_CFG_INVAL) {
1112 ret = ANEG_FAILED;
1113 break;
1114 }
1115 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1116 MR_LP_ADV_HALF_DUPLEX |
1117 MR_LP_ADV_SYM_PAUSE |
1118 MR_LP_ADV_ASYM_PAUSE |
1119 MR_LP_ADV_REMOTE_FAULT1 |
1120 MR_LP_ADV_REMOTE_FAULT2 |
1121 MR_LP_ADV_NEXT_PAGE |
1122 MR_TOGGLE_RX |
1123 MR_NP_RX);
1124 if (ap->rxconfig & ANEG_CFG_FD)
1125 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1126 if (ap->rxconfig & ANEG_CFG_HD)
1127 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1128 if (ap->rxconfig & ANEG_CFG_PS1)
1129 ap->flags |= MR_LP_ADV_SYM_PAUSE;
1130 if (ap->rxconfig & ANEG_CFG_PS2)
1131 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1132 if (ap->rxconfig & ANEG_CFG_RF1)
1133 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1134 if (ap->rxconfig & ANEG_CFG_RF2)
1135 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1136 if (ap->rxconfig & ANEG_CFG_NP)
1137 ap->flags |= MR_LP_ADV_NEXT_PAGE;
1138
1139 ap->link_time = ap->cur_time;
1140
1141 ap->flags ^= (MR_TOGGLE_TX);
1142 if (ap->rxconfig & 0x0008)
1143 ap->flags |= MR_TOGGLE_RX;
1144 if (ap->rxconfig & ANEG_CFG_NP)
1145 ap->flags |= MR_NP_RX;
1146 ap->flags |= MR_PAGE_RX;
1147
1148 ap->state = ANEG_STATE_COMPLETE_ACK;
1149 ret = ANEG_TIMER_ENAB;
1150 break;
1151
1152 case ANEG_STATE_COMPLETE_ACK:
1153 if (ap->ability_match != 0 &&
1154 ap->rxconfig == 0) {
1155 ap->state = ANEG_STATE_AN_ENABLE;
1156 break;
1157 }
1158 delta = ap->cur_time - ap->link_time;
1159 if (delta > ANEG_STATE_SETTLE_TIME) {
1160 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
1161 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1162 } else {
1163 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
1164 !(ap->flags & MR_NP_RX)) {
1165 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1166 } else {
1167 ret = ANEG_FAILED;
1168 }
1169 }
1170 }
1171 break;
1172
1173 case ANEG_STATE_IDLE_DETECT_INIT:
1174 ap->link_time = ap->cur_time;
1175 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1176 tw32_carefully(MAC_MODE, tp->mac_mode);
1177
1178 ap->state = ANEG_STATE_IDLE_DETECT;
1179 ret = ANEG_TIMER_ENAB;
1180 break;
1181
1182 case ANEG_STATE_IDLE_DETECT:
1183 if (ap->ability_match != 0 &&
1184 ap->rxconfig == 0) {
1185 ap->state = ANEG_STATE_AN_ENABLE;
1186 break;
1187 }
1188 delta = ap->cur_time - ap->link_time;
1189 if (delta > ANEG_STATE_SETTLE_TIME) {
1190 /* XXX another gem from the Broadcom driver :( */
1191 ap->state = ANEG_STATE_LINK_OK;
1192 }
1193 break;
1194
1195 case ANEG_STATE_LINK_OK:
1196 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
1197 ret = ANEG_DONE;
1198 break;
1199
1200 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
1201 /* ??? unimplemented */
1202 break;
1203
1204 case ANEG_STATE_NEXT_PAGE_WAIT:
1205 /* ??? unimplemented */
1206 break;
1207
1208 default:
1209 ret = ANEG_FAILED;
1210 break;
1211 };
1212
1213 return ret;
1214 }
1215
tg3_setup_fiber_phy(struct tg3 * tp)1216 static int tg3_setup_fiber_phy(struct tg3 *tp)
1217 {
1218 uint32_t orig_pause_cfg;
1219 uint16_t orig_active_speed;
1220 uint8_t orig_active_duplex;
1221 int current_link_up;
1222 int i;
1223
1224 orig_pause_cfg =
1225 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
1226 TG3_FLAG_TX_PAUSE));
1227 orig_active_speed = tp->link_config.active_speed;
1228 orig_active_duplex = tp->link_config.active_duplex;
1229
1230 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
1231 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
1232 tw32_carefully(MAC_MODE, tp->mac_mode);
1233
1234 /* Reset when initting first time or we have a link. */
1235 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
1236 (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
1237 /* Set PLL lock range. */
1238 tg3_writephy(tp, 0x16, 0x8007);
1239
1240 /* SW reset */
1241 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
1242
1243 /* Wait for reset to complete. */
1244 mdelay(5);
1245
1246 /* Config mode; select PMA/Ch 1 regs. */
1247 tg3_writephy(tp, 0x10, 0x8411);
1248
1249 /* Enable auto-lock and comdet, select txclk for tx. */
1250 tg3_writephy(tp, 0x11, 0x0a10);
1251
1252 tg3_writephy(tp, 0x18, 0x00a0);
1253 tg3_writephy(tp, 0x16, 0x41ff);
1254
1255 /* Assert and deassert POR. */
1256 tg3_writephy(tp, 0x13, 0x0400);
1257 udelay(40);
1258 tg3_writephy(tp, 0x13, 0x0000);
1259
1260 tg3_writephy(tp, 0x11, 0x0a50);
1261 udelay(40);
1262 tg3_writephy(tp, 0x11, 0x0a10);
1263
1264 /* Wait for signal to stabilize */
1265 mdelay(150);
1266
1267 /* Deselect the channel register so we can read the PHYID
1268 * later.
1269 */
1270 tg3_writephy(tp, 0x10, 0x8011);
1271 }
1272
1273 /* Disable link change interrupt. */
1274 tw32_carefully(MAC_EVENT, 0);
1275
1276 current_link_up = 0;
1277 if (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) {
1278 if (!(tp->tg3_flags & TG3_FLAG_GOT_SERDES_FLOWCTL)) {
1279 struct tg3_fiber_aneginfo aninfo;
1280 int status = ANEG_FAILED;
1281 unsigned int tick;
1282 uint32_t tmp;
1283
1284 memset(&aninfo, 0, sizeof(aninfo));
1285 aninfo.flags |= (MR_AN_ENABLE);
1286
1287 tw32(MAC_TX_AUTO_NEG, 0);
1288
1289 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
1290 tw32_carefully(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
1291
1292 tw32_carefully(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
1293
1294 aninfo.state = ANEG_STATE_UNKNOWN;
1295 aninfo.cur_time = 0;
1296 tick = 0;
1297 while (++tick < 195000) {
1298 status = tg3_fiber_aneg_smachine(tp, &aninfo);
1299 if (status == ANEG_DONE ||
1300 status == ANEG_FAILED)
1301 break;
1302
1303 udelay(1);
1304 }
1305
1306 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1307 tw32_carefully(MAC_MODE, tp->mac_mode);
1308
1309 if (status == ANEG_DONE &&
1310 (aninfo.flags &
1311 (MR_AN_COMPLETE | MR_LINK_OK |
1312 MR_LP_ADV_FULL_DUPLEX))) {
1313 uint32_t local_adv, remote_adv;
1314
1315 local_adv = ADVERTISE_PAUSE_CAP;
1316 remote_adv = 0;
1317 if (aninfo.flags & MR_LP_ADV_SYM_PAUSE)
1318 remote_adv |= LPA_PAUSE_CAP;
1319 if (aninfo.flags & MR_LP_ADV_ASYM_PAUSE)
1320 remote_adv |= LPA_PAUSE_ASYM;
1321
1322 tg3_setup_flow_control(tp, local_adv, remote_adv);
1323
1324 tp->tg3_flags |=
1325 TG3_FLAG_GOT_SERDES_FLOWCTL;
1326 current_link_up = 1;
1327 }
1328 for (i = 0; i < 60; i++) {
1329 udelay(20);
1330 tw32_carefully(MAC_STATUS,
1331 (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED));
1332 if ((tr32(MAC_STATUS) &
1333 (MAC_STATUS_SYNC_CHANGED |
1334 MAC_STATUS_CFG_CHANGED)) == 0)
1335 break;
1336 }
1337 if (current_link_up == 0 &&
1338 (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
1339 current_link_up = 1;
1340 }
1341 } else {
1342 /* Forcing 1000FD link up. */
1343 current_link_up = 1;
1344 }
1345 }
1346
1347 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1348 tw32_carefully(MAC_MODE, tp->mac_mode);
1349
1350 tp->hw_status->status =
1351 (SD_STATUS_UPDATED |
1352 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
1353
1354 for (i = 0; i < 100; i++) {
1355 udelay(20);
1356 tw32_carefully(MAC_STATUS,
1357 (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED));
1358 if ((tr32(MAC_STATUS) &
1359 (MAC_STATUS_SYNC_CHANGED |
1360 MAC_STATUS_CFG_CHANGED)) == 0)
1361 break;
1362 }
1363
1364 if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0)
1365 current_link_up = 0;
1366
1367 if (current_link_up == 1) {
1368 tp->link_config.active_speed = SPEED_1000;
1369 tp->link_config.active_duplex = DUPLEX_FULL;
1370 } else {
1371 tp->link_config.active_speed = SPEED_INVALID;
1372 tp->link_config.active_duplex = DUPLEX_INVALID;
1373 }
1374
1375 if (current_link_up != tp->carrier_ok) {
1376 tp->carrier_ok = current_link_up;
1377 tg3_link_report(tp);
1378 } else {
1379 uint32_t now_pause_cfg =
1380 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
1381 TG3_FLAG_TX_PAUSE);
1382 if (orig_pause_cfg != now_pause_cfg ||
1383 orig_active_speed != tp->link_config.active_speed ||
1384 orig_active_duplex != tp->link_config.active_duplex)
1385 tg3_link_report(tp);
1386 }
1387
1388 if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0) {
1389 tw32_carefully(MAC_MODE, tp->mac_mode | MAC_MODE_LINK_POLARITY);
1390 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
1391 tw32_carefully(MAC_MODE, tp->mac_mode);
1392 }
1393 }
1394
1395 return 0;
1396 }
1397 #else
1398 #define tg3_setup_fiber_phy(TP) (-EINVAL)
1399 #endif /* SUPPORT_FIBER_PHY */
1400
tg3_setup_phy(struct tg3 * tp)1401 static int tg3_setup_phy(struct tg3 *tp)
1402 {
1403 int err;
1404
1405 if (tp->phy_id == PHY_ID_SERDES) {
1406 err = tg3_setup_fiber_phy(tp);
1407 } else {
1408 err = tg3_setup_copper_phy(tp);
1409 }
1410
1411 if (tp->link_config.active_speed == SPEED_1000 &&
1412 tp->link_config.active_duplex == DUPLEX_HALF)
1413 tw32(MAC_TX_LENGTHS,
1414 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1415 (6 << TX_LENGTHS_IPG_SHIFT) |
1416 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1417 else
1418 tw32(MAC_TX_LENGTHS,
1419 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1420 (6 << TX_LENGTHS_IPG_SHIFT) |
1421 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1422
1423 return err;
1424 }
1425
1426
1427 #define MAX_WAIT_CNT 1000
1428
1429 /* To stop a block, clear the enable bit and poll till it
1430 * clears.
1431 */
tg3_stop_block(struct tg3 * tp,unsigned long ofs,uint32_t enable_bit)1432 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, uint32_t enable_bit)
1433 {
1434 unsigned int i;
1435 uint32_t val;
1436
1437 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
1438 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
1439 switch(ofs) {
1440 case RCVLSC_MODE:
1441 case DMAC_MODE:
1442 case MBFREE_MODE:
1443 case BUFMGR_MODE:
1444 case MEMARB_MODE:
1445 /* We can't enable/disable these bits of the
1446 * 5705 or 5787, just say success.
1447 */
1448 return 0;
1449 default:
1450 break;
1451 }
1452 }
1453 val = tr32(ofs);
1454 val &= ~enable_bit;
1455 tw32(ofs, val);
1456 tr32(ofs);
1457
1458 for (i = 0; i < MAX_WAIT_CNT; i++) {
1459 udelay(100);
1460 val = tr32(ofs);
1461 if ((val & enable_bit) == 0)
1462 break;
1463 }
1464
1465 if (i == MAX_WAIT_CNT) {
1466 printf( "tg3_stop_block timed out, ofs=%#lx enable_bit=%3x\n",
1467 ofs, enable_bit );
1468 return -ENODEV;
1469 }
1470
1471 return 0;
1472 }
1473
tg3_abort_hw(struct tg3 * tp)1474 static int tg3_abort_hw(struct tg3 *tp)
1475 {
1476 int i, err;
1477 uint32_t val;
1478
1479 tg3_disable_ints(tp);
1480
1481 tp->rx_mode &= ~RX_MODE_ENABLE;
1482 tw32_carefully(MAC_RX_MODE, tp->rx_mode);
1483
1484 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
1485 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
1486 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
1487 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
1488 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
1489 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
1490
1491 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
1492 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
1493 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
1494 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
1495 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
1496 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
1497 if (err)
1498 goto out;
1499
1500 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
1501 tw32_carefully(MAC_MODE, tp->mac_mode);
1502
1503 tp->tx_mode &= ~TX_MODE_ENABLE;
1504 tw32_carefully(MAC_TX_MODE, tp->tx_mode);
1505
1506 for (i = 0; i < MAX_WAIT_CNT; i++) {
1507 udelay(100);
1508 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
1509 break;
1510 }
1511 if (i >= MAX_WAIT_CNT) {
1512 printf("tg3_abort_hw timed out TX_MODE_ENABLE will not clear MAC_TX_MODE=%x\n",
1513 (unsigned int) tr32(MAC_TX_MODE));
1514 return -ENODEV;
1515 }
1516
1517 err = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
1518 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
1519 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
1520
1521 val = tr32(FTQ_RESET);
1522 val |= FTQ_RESET_DMA_READ_QUEUE | FTQ_RESET_DMA_HIGH_PRI_READ |
1523 FTQ_RESET_SEND_BD_COMPLETION | FTQ_RESET_DMA_WRITE |
1524 FTQ_RESET_DMA_HIGH_PRI_WRITE | FTQ_RESET_SEND_DATA_COMPLETION |
1525 FTQ_RESET_HOST_COALESCING | FTQ_RESET_MAC_TX |
1526 FTQ_RESET_RX_BD_COMPLETE | FTQ_RESET_RX_LIST_PLCMT |
1527 FTQ_RESET_RX_DATA_COMPLETION;
1528 tw32(FTQ_RESET, val);
1529
1530 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
1531 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
1532 if (err)
1533 goto out;
1534
1535 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
1536
1537 out:
1538 return err;
1539 }
1540
tg3_chip_reset(struct tg3 * tp)1541 static void tg3_chip_reset(struct tg3 *tp)
1542 {
1543 uint32_t val;
1544
1545 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_5704)) {
1546 /* Force NVRAM to settle.
1547 * This deals with a chip bug which can result in EEPROM
1548 * corruption.
1549 */
1550 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
1551 int i;
1552
1553 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
1554 for (i = 0; i < 100000; i++) {
1555 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
1556 break;
1557 udelay(10);
1558 }
1559 }
1560 }
1561 /* In Etherboot we don't need to worry about the 5701
1562 * REG_WRITE_BUG because we do all register writes indirectly.
1563 */
1564
1565 // Alf: here patched
1566 /* do the reset */
1567 val = GRC_MISC_CFG_CORECLK_RESET;
1568 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
1569 if (tr32(0x7e2c) == 0x60) {
1570 tw32(0x7e2c, 0x20);
1571 }
1572 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
1573 tw32(GRC_MISC_CFG, (1 << 29));
1574 val |= (1 << 29);
1575 }
1576 }
1577
1578 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
1579 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1580 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)) {
1581 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
1582 }
1583
1584 // Alf : Please VALIDATE THIS.
1585 // It is necessary in my case (5751) to prevent a reboot, but
1586 // I have no idea about a side effect on any other version.
1587 // It appears to be what's done in tigon3.c from Broadcom
1588 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
1589 tw32(GRC_MISC_CFG, 0x20000000) ;
1590 val |= 0x20000000 ;
1591 }
1592
1593 tw32(GRC_MISC_CFG, val);
1594
1595 /* Flush PCI posted writes. The normal MMIO registers
1596 * are inaccessible at this time so this is the only
1597 * way to make this reliably. I tried to use indirect
1598 * register read/write but this upset some 5701 variants.
1599 */
1600 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
1601
1602 udelay(120);
1603
1604 /* Re-enable indirect register accesses. */
1605 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
1606 tp->misc_host_ctrl);
1607
1608 /* Set MAX PCI retry to zero. */
1609 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
1610 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
1611 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
1612 val |= PCISTATE_RETRY_SAME_DMA;
1613 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
1614
1615 pci_restore_state(tp->pdev, tp->pci_cfg_state);
1616
1617 /* Make sure PCI-X relaxed ordering bit is clear. */
1618 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
1619 val &= ~PCIX_CAPS_RELAXED_ORDERING;
1620 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
1621
1622 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
1623
1624 if (((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0) &&
1625 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
1626 tp->pci_clock_ctrl |=
1627 (CLOCK_CTRL_FORCE_CLKRUN | CLOCK_CTRL_CLKRUN_OENABLE);
1628 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
1629 }
1630
1631 tw32(TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
1632 }
1633
tg3_stop_fw(struct tg3 * tp)1634 static void tg3_stop_fw(struct tg3 *tp)
1635 {
1636 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
1637 uint32_t val;
1638 int i;
1639
1640 tg3_write_mem(NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1641 val = tr32(GRC_RX_CPU_EVENT);
1642 val |= (1 << 14);
1643 tw32(GRC_RX_CPU_EVENT, val);
1644
1645 /* Wait for RX cpu to ACK the event. */
1646 for (i = 0; i < 100; i++) {
1647 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
1648 break;
1649 udelay(1);
1650 }
1651 }
1652 }
1653
tg3_restart_fw(struct tg3 * tp,uint32_t state)1654 static int tg3_restart_fw(struct tg3 *tp, uint32_t state)
1655 {
1656 uint32_t val;
1657 int i;
1658
1659 tg3_write_mem(NIC_SRAM_FIRMWARE_MBOX,
1660 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1661 /* Wait for firmware initialization to complete. */
1662 for (i = 0; i < 100000; i++) {
1663 tg3_read_mem(NIC_SRAM_FIRMWARE_MBOX, &val);
1664 if (val == (uint32_t) ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1665 break;
1666 udelay(10);
1667 }
1668 if (i >= 100000 &&
1669 !(tp->tg3_flags2 & TG3_FLG2_SUN_5704) &&
1670 !(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)) {
1671 printf ( "Firmware will not restart magic=%#x\n",
1672 val );
1673 return -ENODEV;
1674 }
1675 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1676 state = DRV_STATE_SUSPEND;
1677 }
1678
1679 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
1680 (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)) {
1681 // Enable PCIE bug fix
1682 tg3_read_mem(0x7c00, &val);
1683 tg3_write_mem(0x7c00, val | 0x02000000);
1684 }
1685 tg3_write_mem(NIC_SRAM_FW_DRV_STATE_MBOX, state);
1686 return 0;
1687 }
1688
tg3_halt(struct tg3 * tp)1689 static int tg3_halt(struct tg3 *tp)
1690 {
1691 tg3_stop_fw(tp);
1692 tg3_abort_hw(tp);
1693 tg3_chip_reset(tp);
1694 return tg3_restart_fw(tp, DRV_STATE_UNLOAD);
1695 }
1696
__tg3_set_mac_addr(struct tg3 * tp)1697 static void __tg3_set_mac_addr(struct tg3 *tp)
1698 {
1699 uint32_t addr_high, addr_low;
1700 int i;
1701
1702 addr_high = ((tp->nic->node_addr[0] << 8) |
1703 tp->nic->node_addr[1]);
1704 addr_low = ((tp->nic->node_addr[2] << 24) |
1705 (tp->nic->node_addr[3] << 16) |
1706 (tp->nic->node_addr[4] << 8) |
1707 (tp->nic->node_addr[5] << 0));
1708 for (i = 0; i < 4; i++) {
1709 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
1710 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
1711 }
1712
1713 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
1714 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
1715 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)) {
1716 for(i = 0; i < 12; i++) {
1717 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
1718 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
1719 }
1720 }
1721 addr_high = (tp->nic->node_addr[0] +
1722 tp->nic->node_addr[1] +
1723 tp->nic->node_addr[2] +
1724 tp->nic->node_addr[3] +
1725 tp->nic->node_addr[4] +
1726 tp->nic->node_addr[5]) &
1727 TX_BACKOFF_SEED_MASK;
1728 tw32(MAC_TX_BACKOFF_SEED, addr_high);
1729 }
1730
tg3_set_bdinfo(struct tg3 * tp,uint32_t bdinfo_addr,dma_addr_t mapping,uint32_t maxlen_flags,uint32_t nic_addr)1731 static void tg3_set_bdinfo(struct tg3 *tp, uint32_t bdinfo_addr,
1732 dma_addr_t mapping, uint32_t maxlen_flags,
1733 uint32_t nic_addr)
1734 {
1735 tg3_write_mem((bdinfo_addr +
1736 TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
1737 ((uint64_t) mapping >> 32));
1738 tg3_write_mem((bdinfo_addr +
1739 TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
1740 ((uint64_t) mapping & 0xffffffff));
1741 tg3_write_mem((bdinfo_addr +
1742 TG3_BDINFO_MAXLEN_FLAGS),
1743 maxlen_flags);
1744 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
1745 tg3_write_mem((bdinfo_addr + TG3_BDINFO_NIC_ADDR), nic_addr);
1746 }
1747 }
1748
1749
tg3_init_rings(struct tg3 * tp)1750 static void tg3_init_rings(struct tg3 *tp)
1751 {
1752 unsigned i;
1753
1754 /* Zero out the tg3 variables */
1755 memset(&tg3_bss, 0, sizeof(tg3_bss));
1756 tp->rx_std = &tg3_bss.rx_std[0];
1757 tp->rx_rcb = &tg3_bss.rx_rcb[0];
1758 tp->tx_ring = &tg3_bss.tx_ring[0];
1759 tp->hw_status = &tg3_bss.hw_status;
1760 tp->hw_stats = &tg3_bss.hw_stats;
1761 tp->mac_mode = 0;
1762
1763
1764 /* Initialize tx/rx rings for packet processing.
1765 *
1766 * The chip has been shut down and the driver detached from
1767 * the networking, so no interrupts or new tx packets will
1768 * end up in the driver.
1769 */
1770
1771 /* Initialize invariants of the rings, we only set this
1772 * stuff once. This works because the card does not
1773 * write into the rx buffer posting rings.
1774 */
1775 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
1776 struct tg3_rx_buffer_desc *rxd;
1777
1778 rxd = &tp->rx_std[i];
1779 rxd->idx_len = (RX_PKT_BUF_SZ - 2 - 64) << RXD_LEN_SHIFT;
1780 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
1781 rxd->opaque = (RXD_OPAQUE_RING_STD | (i << RXD_OPAQUE_INDEX_SHIFT));
1782
1783 /* Note where the receive buffer for the ring is placed */
1784 rxd->addr_hi = 0;
1785 rxd->addr_lo = virt_to_bus(
1786 &tg3_bss.rx_bufs[i%TG3_DEF_RX_RING_PENDING][2]);
1787 }
1788 }
1789
1790 #define TG3_WRITE_SETTINGS(TABLE) \
1791 do { \
1792 const uint32_t *_table, *_end; \
1793 _table = TABLE; \
1794 _end = _table + sizeof(TABLE)/sizeof(TABLE[0]); \
1795 for(; _table < _end; _table += 2) { \
1796 tw32(_table[0], _table[1]); \
1797 } \
1798 } while(0)
1799
1800
1801 /* initialize/reset the tg3 */
tg3_setup_hw(struct tg3 * tp)1802 static int tg3_setup_hw(struct tg3 *tp)
1803 {
1804 uint32_t val, rdmac_mode;
1805 int i, err, limit;
1806
1807 /* Simply don't support setups with extremly buggy firmware in etherboot */
1808 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
1809 printf("Error 5701_A0 firmware bug detected\n");
1810 return -EINVAL;
1811 }
1812
1813 tg3_disable_ints(tp);
1814
1815 /* Originally this was all in tg3_init_hw */
1816
1817 /* Force the chip into D0. */
1818 tg3_set_power_state_0(tp);
1819
1820 tg3_switch_clocks(tp);
1821
1822 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
1823
1824 // This should go somewhere else
1825 #define T3_PCIE_CAPABILITY_ID_REG 0xD0
1826 #define T3_PCIE_CAPABILITY_ID 0x10
1827 #define T3_PCIE_CAPABILITY_REG 0xD2
1828
1829 /* Originally this was all in tg3_reset_hw */
1830
1831 tg3_stop_fw(tp);
1832
1833 /* No need to call tg3_abort_hw here, it is called before tg3_setup_hw. */
1834
1835 tg3_chip_reset(tp);
1836
1837 tw32(GRC_MODE, tp->grc_mode); /* Redundant? */
1838
1839 err = tg3_restart_fw(tp, DRV_STATE_START);
1840 if (err)
1841 return err;
1842
1843 if (tp->phy_id == PHY_ID_SERDES) {
1844 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
1845 }
1846 tw32_carefully(MAC_MODE, tp->mac_mode);
1847
1848
1849 /* This works around an issue with Athlon chipsets on
1850 * B3 tigon3 silicon. This bit has no effect on any
1851 * other revision.
1852 * Alf: Except 5750 ! (which reboots)
1853 */
1854
1855 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
1856 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
1857 tw32_carefully(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
1858
1859 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
1860 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
1861 val = tr32(TG3PCI_PCISTATE);
1862 val |= PCISTATE_RETRY_SAME_DMA;
1863 tw32(TG3PCI_PCISTATE, val);
1864 }
1865
1866 /* Descriptor ring init may make accesses to the
1867 * NIC SRAM area to setup the TX descriptors, so we
1868 * can only do this after the hardware has been
1869 * successfully reset.
1870 */
1871 tg3_init_rings(tp);
1872
1873 /* Clear statistics/status block in chip */
1874 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
1875 for (i = NIC_SRAM_STATS_BLK;
1876 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
1877 i += sizeof(uint32_t)) {
1878 tg3_write_mem(i, 0);
1879 udelay(40);
1880 }
1881 }
1882
1883 /* This value is determined during the probe time DMA
1884 * engine test, tg3_setup_dma.
1885 */
1886 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
1887
1888 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
1889 GRC_MODE_4X_NIC_SEND_RINGS |
1890 GRC_MODE_NO_TX_PHDR_CSUM |
1891 GRC_MODE_NO_RX_PHDR_CSUM);
1892 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
1893 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1894 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
1895
1896 tw32(GRC_MODE,
1897 tp->grc_mode |
1898 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
1899
1900 /* Setup the timer prescalar register. Clock is always 66Mhz. */
1901 tw32(GRC_MISC_CFG,
1902 (65 << GRC_MISC_CFG_PRESCALAR_SHIFT));
1903
1904 /* Initialize MBUF/DESC pool. */
1905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
1906 /* Do nothing. */
1907 } else if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) &&
1908 (tp->pci_chip_rev_id != CHIPREV_ID_5721)) {
1909 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
1910 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
1911 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
1912 else
1913 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
1914 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
1915 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
1916 }
1917 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
1918 tw32(BUFMGR_MB_RDMA_LOW_WATER,
1919 tp->bufmgr_config.mbuf_read_dma_low_water);
1920 tw32(BUFMGR_MB_MACRX_LOW_WATER,
1921 tp->bufmgr_config.mbuf_mac_rx_low_water);
1922 tw32(BUFMGR_MB_HIGH_WATER,
1923 tp->bufmgr_config.mbuf_high_water);
1924 } else {
1925 tw32(BUFMGR_MB_RDMA_LOW_WATER,
1926 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
1927 tw32(BUFMGR_MB_MACRX_LOW_WATER,
1928 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
1929 tw32(BUFMGR_MB_HIGH_WATER,
1930 tp->bufmgr_config.mbuf_high_water_jumbo);
1931 }
1932 tw32(BUFMGR_DMA_LOW_WATER,
1933 tp->bufmgr_config.dma_low_water);
1934 tw32(BUFMGR_DMA_HIGH_WATER,
1935 tp->bufmgr_config.dma_high_water);
1936
1937 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
1938 for (i = 0; i < 2000; i++) {
1939 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
1940 break;
1941 udelay(10);
1942 }
1943 if (i >= 2000) {
1944 printf("tg3_setup_hw cannot enable BUFMGR\n");
1945 return -ENODEV;
1946 }
1947
1948 tw32(FTQ_RESET, 0xffffffff);
1949 tw32(FTQ_RESET, 0x00000000);
1950 for (i = 0; i < 2000; i++) {
1951 if (tr32(FTQ_RESET) == 0x00000000)
1952 break;
1953 udelay(10);
1954 }
1955 if (i >= 2000) {
1956 printf("tg3_setup_hw cannot reset FTQ\n");
1957 return -ENODEV;
1958 }
1959
1960 /* Initialize TG3_BDINFO's at:
1961 * RCVDBDI_STD_BD: standard eth size rx ring
1962 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
1963 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
1964 *
1965 * like so:
1966 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
1967 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
1968 * ring attribute flags
1969 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
1970 *
1971 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
1972 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
1973 *
1974 * ??? No space allocated for mini receive ring? :(
1975 *
1976 * The size of each ring is fixed in the firmware, but the location is
1977 * configurable.
1978 */
1979 {
1980 static const uint32_t table_all[] = {
1981 /* Setup replenish thresholds. */
1982 RCVBDI_STD_THRESH, TG3_DEF_RX_RING_PENDING / 8,
1983
1984 /* Etherboot lives below 4GB */
1985 RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 0,
1986 RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, NIC_SRAM_RX_BUFFER_DESC,
1987 };
1988 static const uint32_t table_not_5705[] = {
1989 /* Buffer maximum length */
1990 RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT,
1991
1992 /* Disable the mini frame rx ring */
1993 RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED,
1994
1995 /* Disable the jumbo frame rx ring */
1996 RCVBDI_JUMBO_THRESH, 0,
1997 RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED,
1998
1999
2000 };
2001 TG3_WRITE_SETTINGS(table_all);
2002 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
2003 virt_to_bus(tp->rx_std));
2004 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
2005 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
2006 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
2007 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
2008 } else {
2009 TG3_WRITE_SETTINGS(table_not_5705);
2010 }
2011 }
2012
2013
2014 /* There is only one send ring on 5705 and 5787, no need to explicitly
2015 * disable the others.
2016 */
2017 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
2018 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) {
2019 /* Clear out send RCB ring in SRAM. */
2020 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
2021 tg3_write_mem(i + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED);
2022 }
2023
2024 tp->tx_prod = 0;
2025 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
2026 tw32_mailbox2(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
2027
2028 tg3_set_bdinfo(tp,
2029 NIC_SRAM_SEND_RCB,
2030 virt_to_bus(tp->tx_ring),
2031 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
2032 NIC_SRAM_TX_BUFFER_DESC);
2033
2034 /* There is only one receive return ring on 5705 and 5787, no need to
2035 * explicitly disable the others.
2036 */
2037 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
2038 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) {
2039 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK; i += TG3_BDINFO_SIZE) {
2040 tg3_write_mem(i + TG3_BDINFO_MAXLEN_FLAGS,
2041 BDINFO_FLAGS_DISABLED);
2042 }
2043 }
2044
2045 tp->rx_rcb_ptr = 0;
2046 tw32_mailbox2(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
2047
2048 tg3_set_bdinfo(tp,
2049 NIC_SRAM_RCV_RET_RCB,
2050 virt_to_bus(tp->rx_rcb),
2051 (TG3_RX_RCB_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
2052 0);
2053
2054 tp->rx_std_ptr = TG3_DEF_RX_RING_PENDING;
2055 tw32_mailbox2(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2056 tp->rx_std_ptr);
2057
2058 tw32_mailbox2(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 0);
2059
2060 /* Initialize MAC address and backoff seed. */
2061 __tg3_set_mac_addr(tp);
2062
2063 /* Calculate RDMAC_MODE setting early, we need it to determine
2064 * the RCVLPC_STATE_ENABLE mask.
2065 */
2066 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
2067 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
2068 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
2069 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
2070 RDMAC_MODE_LNGREAD_ENAB);
2071 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
2072 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
2073 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2074 if (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
2075 if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
2076 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
2077 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
2078 }
2079 }
2080 }
2081
2082 /* Setup host coalescing engine. */
2083 tw32(HOSTCC_MODE, 0);
2084 for (i = 0; i < 2000; i++) {
2085 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
2086 break;
2087 udelay(10);
2088 }
2089
2090 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
2091 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
2092 tw32_carefully(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
2093
2094 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
2095 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
2096 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
2097 GRC_LCLCTRL_GPIO_OUTPUT1);
2098 tw32_carefully(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
2099
2100 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
2101 tr32(MAILBOX_INTERRUPT_0);
2102
2103 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
2104 tw32_carefully(DMAC_MODE, DMAC_MODE_ENABLE);
2105 }
2106
2107 val = ( WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
2108 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
2109 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
2110 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
2111 WDMAC_MODE_LNGREAD_ENAB);
2112 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) &&
2113 ((tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) != 0) &&
2114 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
2115 val |= WDMAC_MODE_RX_ACCEL;
2116 }
2117
2118 /* Host coalescing bug fix */
2119 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
2120 val |= (1 << 29);
2121
2122 tw32_carefully(WDMAC_MODE, val);
2123
2124 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
2125 val = tr32(TG3PCI_X_CAPS);
2126 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
2127 val &= PCIX_CAPS_BURST_MASK;
2128 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
2129 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2130 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
2131 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
2132 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
2133 val |= (tp->split_mode_max_reqs <<
2134 PCIX_CAPS_SPLIT_SHIFT);
2135 }
2136 tw32(TG3PCI_X_CAPS, val);
2137 }
2138
2139 tw32_carefully(RDMAC_MODE, rdmac_mode);
2140 {
2141 static const uint32_t table_all[] = {
2142 /* MTU + ethernet header + FCS + optional VLAN tag */
2143 MAC_RX_MTU_SIZE, ETH_MAX_MTU + ETH_HLEN + 8,
2144
2145 /* The slot time is changed by tg3_setup_phy if we
2146 * run at gigabit with half duplex.
2147 */
2148 MAC_TX_LENGTHS,
2149 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2150 (6 << TX_LENGTHS_IPG_SHIFT) |
2151 (32 << TX_LENGTHS_SLOT_TIME_SHIFT),
2152
2153 /* Receive rules. */
2154 MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS,
2155 RCVLPC_CONFIG, 0x0181,
2156
2157 /* Receive/send statistics. */
2158 RCVLPC_STATS_ENABLE, 0xffffff,
2159 RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE,
2160 SNDDATAI_STATSENAB, 0xffffff,
2161 SNDDATAI_STATSCTRL, (SNDDATAI_SCTRL_ENABLE |SNDDATAI_SCTRL_FASTUPD),
2162
2163 /* Host coalescing engine */
2164 HOSTCC_RXCOL_TICKS, 0,
2165 HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS,
2166 HOSTCC_RXMAX_FRAMES, 1,
2167 HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES,
2168 HOSTCC_RXCOAL_MAXF_INT, 1,
2169 HOSTCC_TXCOAL_MAXF_INT, 0,
2170
2171 /* Status/statistics block address. */
2172 /* Etherboot lives below 4GB, so HIGH == 0 */
2173 HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 0,
2174
2175 /* No need to enable 32byte coalesce mode. */
2176 HOSTCC_MODE, HOSTCC_MODE_ENABLE | 0,
2177
2178 RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE,
2179 RCVLPC_MODE, RCVLPC_MODE_ENABLE,
2180
2181 RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE,
2182
2183 SNDDATAC_MODE, SNDDATAC_MODE_ENABLE,
2184 SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE,
2185 RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB,
2186 RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ,
2187 SNDDATAI_MODE, SNDDATAI_MODE_ENABLE,
2188 SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE,
2189 SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE,
2190
2191 /* Accept all multicast frames. */
2192 MAC_HASH_REG_0, 0xffffffff,
2193 MAC_HASH_REG_1, 0xffffffff,
2194 MAC_HASH_REG_2, 0xffffffff,
2195 MAC_HASH_REG_3, 0xffffffff,
2196 };
2197 static const uint32_t table_not_5705[] = {
2198 /* Host coalescing engine */
2199 HOSTCC_RXCOAL_TICK_INT, 0,
2200 HOSTCC_TXCOAL_TICK_INT, 0,
2201
2202 /* Status/statistics block address. */
2203 /* Etherboot lives below 4GB, so HIGH == 0 */
2204 HOSTCC_STAT_COAL_TICKS, DEFAULT_STAT_COAL_TICKS,
2205 HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 0,
2206 HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK,
2207 HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK,
2208
2209 RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE,
2210
2211 MBFREE_MODE, MBFREE_MODE_ENABLE,
2212 };
2213 TG3_WRITE_SETTINGS(table_all);
2214 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
2215 virt_to_bus(tp->hw_stats));
2216 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
2217 virt_to_bus(tp->hw_status));
2218 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
2219 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) {
2220 TG3_WRITE_SETTINGS(table_not_5705);
2221 }
2222 }
2223
2224 tp->tx_mode = TX_MODE_ENABLE;
2225 tw32_carefully(MAC_TX_MODE, tp->tx_mode);
2226
2227 tp->rx_mode = RX_MODE_ENABLE;
2228 tw32_carefully(MAC_RX_MODE, tp->rx_mode);
2229
2230 tp->mi_mode = MAC_MI_MODE_BASE;
2231 tw32_carefully(MAC_MI_MODE, tp->mi_mode);
2232
2233 tw32(MAC_LED_CTRL, 0);
2234 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2235 if (tp->phy_id == PHY_ID_SERDES) {
2236 tw32_carefully(MAC_RX_MODE, RX_MODE_RESET);
2237 }
2238 tp->rx_mode |= RX_MODE_KEEP_VLAN_TAG; /* drop tagged vlan packets */
2239 tw32_carefully(MAC_RX_MODE, tp->rx_mode);
2240
2241 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
2242 tw32(MAC_SERDES_CFG, 0x616000);
2243
2244 /* Prevent chip from dropping frames when flow control
2245 * is enabled.
2246 */
2247 tw32(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
2248 tr32(MAC_LOW_WMARK_MAX_RX_FRAME);
2249
2250 err = tg3_setup_phy(tp);
2251
2252 /* Ignore CRC stats */
2253
2254 /* Initialize receive rules. */
2255 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
2256 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
2257 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
2258 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
2259
2260 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
2261 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750))
2262 limit = 8;
2263 else
2264 limit = 16;
2265 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
2266 limit -= 4;
2267 switch (limit) {
2268 case 16: tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
2269 case 15: tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
2270 case 14: tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
2271 case 13: tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
2272 case 12: tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
2273 case 11: tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
2274 case 10: tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
2275 case 9: tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
2276 case 8: tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
2277 case 7: tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
2278 case 6: tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
2279 case 5: tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
2280 case 4: /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
2281 case 3: /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
2282 case 2:
2283 case 1:
2284 default:
2285 break;
2286 };
2287
2288 return err;
2289 }
2290
2291
2292
2293 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
tg3_nvram_init(struct tg3 * tp)2294 static void tg3_nvram_init(struct tg3 *tp)
2295 {
2296 tw32(GRC_EEPROM_ADDR,
2297 (EEPROM_ADDR_FSM_RESET |
2298 (EEPROM_DEFAULT_CLOCK_PERIOD <<
2299 EEPROM_ADDR_CLKPERD_SHIFT)));
2300
2301 mdelay(1);
2302
2303 /* Enable seeprom accesses. */
2304 tw32_carefully(GRC_LOCAL_CTRL,
2305 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
2306
2307 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2308 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2309 uint32_t nvcfg1 = tr32(NVRAM_CFG1);
2310
2311 tp->tg3_flags |= TG3_FLAG_NVRAM;
2312 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
2313 if (nvcfg1 & NVRAM_CFG1_BUFFERED_MODE)
2314 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
2315 } else {
2316 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
2317 tw32(NVRAM_CFG1, nvcfg1);
2318 }
2319
2320 } else {
2321 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
2322 }
2323 }
2324
2325
tg3_nvram_read_using_eeprom(struct tg3 * tp __unused,uint32_t offset,uint32_t * val)2326 static int tg3_nvram_read_using_eeprom(
2327 struct tg3 *tp __unused, uint32_t offset, uint32_t *val)
2328 {
2329 uint32_t tmp;
2330 int i;
2331
2332 if (offset > EEPROM_ADDR_ADDR_MASK ||
2333 (offset % 4) != 0) {
2334 return -EINVAL;
2335 }
2336
2337 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2338 EEPROM_ADDR_DEVID_MASK |
2339 EEPROM_ADDR_READ);
2340 tw32(GRC_EEPROM_ADDR,
2341 tmp |
2342 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2343 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2344 EEPROM_ADDR_ADDR_MASK) |
2345 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2346
2347 for (i = 0; i < 10000; i++) {
2348 tmp = tr32(GRC_EEPROM_ADDR);
2349
2350 if (tmp & EEPROM_ADDR_COMPLETE)
2351 break;
2352 udelay(100);
2353 }
2354 if (!(tmp & EEPROM_ADDR_COMPLETE)) {
2355 return -EBUSY;
2356 }
2357
2358 *val = tr32(GRC_EEPROM_DATA);
2359 return 0;
2360 }
2361
tg3_nvram_read(struct tg3 * tp,uint32_t offset,uint32_t * val)2362 static int tg3_nvram_read(struct tg3 *tp, uint32_t offset, uint32_t *val)
2363 {
2364 int i, saw_done_clear;
2365
2366 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2367 return tg3_nvram_read_using_eeprom(tp, offset, val);
2368
2369 if (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED)
2370 offset = ((offset / NVRAM_BUFFERED_PAGE_SIZE) <<
2371 NVRAM_BUFFERED_PAGE_POS) +
2372 (offset % NVRAM_BUFFERED_PAGE_SIZE);
2373
2374 if (offset > NVRAM_ADDR_MSK)
2375 return -EINVAL;
2376
2377 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2378 for (i = 0; i < 1000; i++) {
2379 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2380 break;
2381 udelay(20);
2382 }
2383
2384 tw32(NVRAM_ADDR, offset);
2385 tw32(NVRAM_CMD,
2386 NVRAM_CMD_RD | NVRAM_CMD_GO |
2387 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2388
2389 /* Wait for done bit to clear then set again. */
2390 saw_done_clear = 0;
2391 for (i = 0; i < 1000; i++) {
2392 udelay(10);
2393 if (!saw_done_clear &&
2394 !(tr32(NVRAM_CMD) & NVRAM_CMD_DONE))
2395 saw_done_clear = 1;
2396 else if (saw_done_clear &&
2397 (tr32(NVRAM_CMD) & NVRAM_CMD_DONE))
2398 break;
2399 }
2400 if (i >= 1000) {
2401 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2402 return -EBUSY;
2403 }
2404
2405 *val = bswap_32(tr32(NVRAM_RDDATA));
2406 tw32(NVRAM_SWARB, 0x20);
2407
2408 return 0;
2409 }
2410
2411 struct subsys_tbl_ent {
2412 uint16_t subsys_vendor, subsys_devid;
2413 uint32_t phy_id;
2414 };
2415
2416 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
2417 /* Broadcom boards. */
2418 { 0x14e4, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
2419 { 0x14e4, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
2420 { 0x14e4, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
2421 { 0x14e4, 0x0003, PHY_ID_SERDES }, /* BCM95700A9 */
2422 { 0x14e4, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
2423 { 0x14e4, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
2424 { 0x14e4, 0x0007, PHY_ID_SERDES }, /* BCM95701A7 */
2425 { 0x14e4, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
2426 { 0x14e4, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
2427 { 0x14e4, 0x0009, PHY_ID_BCM5701 }, /* BCM95703Ax1 */
2428 { 0x14e4, 0x8009, PHY_ID_BCM5701 }, /* BCM95703Ax2 */
2429
2430 /* 3com boards. */
2431 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
2432 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
2433 /* { PCI_VENDOR_ID_3COM, 0x1002, PHY_ID_XXX }, 3C996CT */
2434 /* { PCI_VENDOR_ID_3COM, 0x1003, PHY_ID_XXX }, 3C997T */
2435 { PCI_VENDOR_ID_3COM, 0x1004, PHY_ID_SERDES }, /* 3C996SX */
2436 /* { PCI_VENDOR_ID_3COM, 0x1005, PHY_ID_XXX }, 3C997SZ */
2437 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
2438 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
2439
2440 /* DELL boards. */
2441 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
2442 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
2443 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
2444 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
2445 { PCI_VENDOR_ID_DELL, 0x0179, PHY_ID_BCM5751 }, /* EtherXpress */
2446
2447 /* Fujitsu Siemens Computer */
2448 { PCI_VENDOR_ID_FSC, 0x105d, PHY_ID_BCM5751 }, /* Futro C200 */
2449
2450 /* Compaq boards. */
2451 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
2452 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
2453 { PCI_VENDOR_ID_COMPAQ, 0x007d, PHY_ID_SERDES }, /* CHANGELING */
2454 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
2455 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 } /* NC7780_2 */
2456 };
2457
tg3_phy_probe(struct tg3 * tp)2458 static int tg3_phy_probe(struct tg3 *tp)
2459 {
2460 uint32_t eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
2461 uint32_t hw_phy_id, hw_phy_id_masked;
2462 enum phy_led_mode eeprom_led_mode;
2463 uint32_t val;
2464 unsigned i;
2465 int eeprom_signature_found, err;
2466
2467 tp->phy_id = PHY_ID_INVALID;
2468
2469 for (i = 0; i < sizeof(subsys_id_to_phy_id)/sizeof(subsys_id_to_phy_id[0]); i++) {
2470 if ((subsys_id_to_phy_id[i].subsys_vendor == tp->subsystem_vendor) &&
2471 (subsys_id_to_phy_id[i].subsys_devid == tp->subsystem_device)) {
2472 tp->phy_id = subsys_id_to_phy_id[i].phy_id;
2473 break;
2474 }
2475 }
2476
2477 eeprom_phy_id = PHY_ID_INVALID;
2478 eeprom_led_mode = led_mode_auto;
2479 eeprom_signature_found = 0;
2480 tg3_read_mem(NIC_SRAM_DATA_SIG, &val);
2481 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
2482 uint32_t nic_cfg;
2483
2484 tg3_read_mem(NIC_SRAM_DATA_CFG, &nic_cfg);
2485 tp->nic_sram_data_cfg = nic_cfg;
2486
2487 eeprom_signature_found = 1;
2488
2489 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
2490 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) {
2491 eeprom_phy_id = PHY_ID_SERDES;
2492 } else {
2493 uint32_t nic_phy_id;
2494
2495 tg3_read_mem(NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
2496 if (nic_phy_id != 0) {
2497 uint32_t id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
2498 uint32_t id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
2499
2500 eeprom_phy_id = (id1 >> 16) << 10;
2501 eeprom_phy_id |= (id2 & 0xfc00) << 16;
2502 eeprom_phy_id |= (id2 & 0x03ff) << 0;
2503 }
2504 }
2505
2506 switch (nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK) {
2507 case NIC_SRAM_DATA_CFG_LED_TRIPLE_SPD:
2508 eeprom_led_mode = led_mode_three_link;
2509 break;
2510
2511 case NIC_SRAM_DATA_CFG_LED_LINK_SPD:
2512 eeprom_led_mode = led_mode_link10;
2513 break;
2514
2515 default:
2516 eeprom_led_mode = led_mode_auto;
2517 break;
2518 };
2519 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
2520 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
2521 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) &&
2522 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)) {
2523 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
2524 }
2525
2526 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE)
2527 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
2528 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
2529 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
2530 }
2531
2532 /* Now read the physical PHY_ID from the chip and verify
2533 * that it is sane. If it doesn't look good, we fall back
2534 * to either the hard-coded table based PHY_ID and failing
2535 * that the value found in the eeprom area.
2536 */
2537 err = tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
2538 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
2539
2540 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
2541 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
2542 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
2543
2544 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
2545
2546 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
2547 tp->phy_id = hw_phy_id;
2548 } else {
2549 /* phy_id currently holds the value found in the
2550 * subsys_id_to_phy_id[] table or PHY_ID_INVALID
2551 * if a match was not found there.
2552 */
2553 if (tp->phy_id == PHY_ID_INVALID) {
2554 if (!eeprom_signature_found ||
2555 !KNOWN_PHY_ID(eeprom_phy_id & PHY_ID_MASK))
2556 return -ENODEV;
2557 tp->phy_id = eeprom_phy_id;
2558 }
2559 }
2560
2561 err = tg3_phy_reset(tp);
2562 if (err)
2563 return err;
2564
2565 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2566 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2567 uint32_t mii_tg3_ctrl;
2568
2569 /* These chips, when reset, only advertise 10Mb
2570 * capabilities. Fix that.
2571 */
2572 err = tg3_writephy(tp, MII_ADVERTISE,
2573 (ADVERTISE_CSMA |
2574 ADVERTISE_PAUSE_CAP |
2575 ADVERTISE_10HALF |
2576 ADVERTISE_10FULL |
2577 ADVERTISE_100HALF |
2578 ADVERTISE_100FULL));
2579 mii_tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
2580 MII_TG3_CTRL_ADV_1000_FULL |
2581 MII_TG3_CTRL_AS_MASTER |
2582 MII_TG3_CTRL_ENABLE_AS_MASTER);
2583 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2584 mii_tg3_ctrl = 0;
2585
2586 err |= tg3_writephy(tp, MII_TG3_CTRL, mii_tg3_ctrl);
2587 err |= tg3_writephy(tp, MII_BMCR,
2588 (BMCR_ANRESTART | BMCR_ANENABLE));
2589 }
2590
2591 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
2592 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2593 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2594 tg3_writedsp(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
2595 }
2596
2597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2598 tg3_writephy(tp, 0x1c, 0x8d68);
2599 tg3_writephy(tp, 0x1c, 0x8d68);
2600 }
2601
2602 /* Enable Ethernet@WireSpeed */
2603 tg3_phy_set_wirespeed(tp);
2604
2605 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
2606 err = tg3_init_5401phy_dsp(tp);
2607 }
2608
2609 /* Determine the PHY led mode.
2610 * Be careful if this gets set wrong it can result in an inability to
2611 * establish a link.
2612 */
2613 if (tp->phy_id == PHY_ID_SERDES) {
2614 tp->led_mode = led_mode_three_link;
2615 }
2616 else if (tp->subsystem_vendor == PCI_VENDOR_ID_DELL) {
2617 tp->led_mode = led_mode_link10;
2618 } else {
2619 tp->led_mode = led_mode_three_link;
2620 if (eeprom_signature_found &&
2621 eeprom_led_mode != led_mode_auto)
2622 tp->led_mode = eeprom_led_mode;
2623 }
2624
2625 if (tp->phy_id == PHY_ID_SERDES)
2626 tp->link_config.advertising =
2627 (ADVERTISED_1000baseT_Half |
2628 ADVERTISED_1000baseT_Full |
2629 ADVERTISED_Autoneg |
2630 ADVERTISED_FIBRE);
2631 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2632 tp->link_config.advertising &=
2633 ~(ADVERTISED_1000baseT_Half |
2634 ADVERTISED_1000baseT_Full);
2635
2636 return err;
2637 }
2638
2639 #if SUPPORT_PARTNO_STR
tg3_read_partno(struct tg3 * tp)2640 static void tg3_read_partno(struct tg3 *tp)
2641 {
2642 unsigned char vpd_data[256];
2643 int i;
2644
2645 for (i = 0; i < 256; i += 4) {
2646 uint32_t tmp;
2647
2648 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
2649 goto out_not_found;
2650
2651 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
2652 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
2653 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
2654 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
2655 }
2656
2657 /* Now parse and find the part number. */
2658 for (i = 0; i < 256; ) {
2659 unsigned char val = vpd_data[i];
2660 int block_end;
2661
2662 if (val == 0x82 || val == 0x91) {
2663 i = (i + 3 +
2664 (vpd_data[i + 1] +
2665 (vpd_data[i + 2] << 8)));
2666 continue;
2667 }
2668
2669 if (val != 0x90)
2670 goto out_not_found;
2671
2672 block_end = (i + 3 +
2673 (vpd_data[i + 1] +
2674 (vpd_data[i + 2] << 8)));
2675 i += 3;
2676 while (i < block_end) {
2677 if (vpd_data[i + 0] == 'P' &&
2678 vpd_data[i + 1] == 'N') {
2679 int partno_len = vpd_data[i + 2];
2680
2681 if (partno_len > 24)
2682 goto out_not_found;
2683
2684 memcpy(tp->board_part_number,
2685 &vpd_data[i + 3],
2686 partno_len);
2687
2688 /* Success. */
2689 return;
2690 }
2691 }
2692
2693 /* Part number not found. */
2694 goto out_not_found;
2695 }
2696
2697 out_not_found:
2698 memcpy(tp->board_part_number, "none", sizeof("none"));
2699 }
2700 #else
2701 #define tg3_read_partno(TP) ((TP)->board_part_number[0] = '\0')
2702 #endif
2703
tg3_get_invariants(struct tg3 * tp)2704 static int tg3_get_invariants(struct tg3 *tp)
2705 {
2706 uint32_t misc_ctrl_reg;
2707 uint32_t pci_state_reg, grc_misc_cfg;
2708 uint16_t pci_cmd;
2709 uint8_t pci_latency;
2710 uint32_t val ;
2711 int err;
2712
2713 /* Read the subsystem vendor and device ids */
2714 pci_read_config_word(tp->pdev, PCI_SUBSYSTEM_VENDOR_ID, &tp->subsystem_vendor);
2715 pci_read_config_word(tp->pdev, PCI_SUBSYSTEM_ID, &tp->subsystem_device);
2716
2717 /* The sun_5704 code needs infrastructure etherboot does have
2718 * ignore it for now.
2719 */
2720
2721 /* If we have an AMD 762 or Intel ICH/ICH0 chipset, write
2722 * reordering to the mailbox registers done by the host
2723 * controller can cause major troubles. We read back from
2724 * every mailbox register write to force the writes to be
2725 * posted to the chip in order.
2726 *
2727 * TG3_FLAG_MBOX_WRITE_REORDER has been forced on.
2728 */
2729
2730 /* Force memory write invalidate off. If we leave it on,
2731 * then on 5700_BX chips we have to enable a workaround.
2732 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundry
2733 * to match the cacheline size. The Broadcom driver have this
2734 * workaround but turns MWI off all the times so never uses
2735 * it. This seems to suggest that the workaround is insufficient.
2736 */
2737 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
2738 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
2739 /* Also, force SERR#/PERR# in PCI command. */
2740 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
2741 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
2742
2743 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
2744 * has the register indirect write enable bit set before
2745 * we try to access any of the MMIO registers. It is also
2746 * critical that the PCI-X hw workaround situation is decided
2747 * before that as well.
2748 */
2749 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, &misc_ctrl_reg);
2750
2751 tp->pci_chip_rev_id = (misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT);
2752
2753 /* Initialize misc host control in PCI block. */
2754 tp->misc_host_ctrl |= (misc_ctrl_reg &
2755 MISC_HOST_CTRL_CHIPREV);
2756 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
2757 tp->misc_host_ctrl);
2758
2759 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, &pci_latency);
2760 if (pci_latency < 64) {
2761 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 64);
2762 }
2763
2764 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &pci_state_reg);
2765
2766 /* If this is a 5700 BX chipset, and we are in PCI-X
2767 * mode, enable register write workaround.
2768 *
2769 * The workaround is to use indirect register accesses
2770 * for all chip writes not to mailbox registers.
2771 *
2772 * In etherboot to simplify things we just always use this work around.
2773 */
2774 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
2775 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
2776 }
2777 /* Back to back register writes can cause problems on the 5701,
2778 * the workaround is to read back all reg writes except those to
2779 * mailbox regs.
2780 * In etherboot we always use indirect register accesses so
2781 * we don't see this.
2782 */
2783
2784 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
2785 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
2786 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
2787 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
2788
2789 /* Chip-specific fixup from Broadcom driver */
2790 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
2791 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
2792 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
2793 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
2794 }
2795
2796 /* determine if it is PCIE system */
2797 // Alf : I have no idea what this is about...
2798 // But it's definitely usefull
2799 val = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
2800 if (val)
2801 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
2802
2803 /* Force the chip into D0. */
2804 tg3_set_power_state_0(tp);
2805
2806 /* Etherboot does not ask the tg3 to do checksums */
2807 /* Etherboot does not ask the tg3 to do jumbo frames */
2808 /* Ehterboot does not ask the tg3 to use WakeOnLan. */
2809
2810 /* A few boards don't want Ethernet@WireSpeed phy feature */
2811 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
2812 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
2813 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2814 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
2815 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1))) {
2816 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
2817 }
2818
2819 /* Avoid tagged irq status etherboot does not use irqs */
2820
2821 /* Only 5701 and later support tagged irq status mode.
2822 * Also, 5788 chips cannot use tagged irq status.
2823 *
2824 * However, since etherboot does not use irqs avoid tagged irqs
2825 * status because the interrupt condition is more difficult to
2826 * fully clear in that mode.
2827 */
2828
2829 /* Since some 5700_AX && 5700_BX have problems with 32BYTE
2830 * coalesce_mode, and the rest work fine anything set.
2831 * Don't enable HOST_CC_MODE_32BYTE in etherboot.
2832 */
2833
2834 /* Initialize MAC MI mode, polling disabled. */
2835 tw32_carefully(MAC_MI_MODE, tp->mi_mode);
2836
2837 /* Initialize data/descriptor byte/word swapping. */
2838 tw32(GRC_MODE, tp->grc_mode);
2839
2840 tg3_switch_clocks(tp);
2841
2842 /* Clear this out for sanity. */
2843 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
2844
2845 /* Etherboot does not need to check if the PCIX_TARGET_HWBUG
2846 * is needed. It always uses it.
2847 */
2848
2849 udelay(50);
2850 tg3_nvram_init(tp);
2851
2852 /* The TX descriptors will reside in main memory.
2853 */
2854
2855 /* See which board we are using.
2856 */
2857 grc_misc_cfg = tr32(GRC_MISC_CFG);
2858 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
2859
2860 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
2861 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
2862 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
2863 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
2864 }
2865
2866 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
2867 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
2868 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
2869 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
2870
2871 #define PCI_DEVICE_ID_TIGON3_5901 0x170d
2872 #define PCI_DEVICE_ID_TIGON3_5901_2 0x170e
2873
2874 /* these are limited to 10/100 only */
2875 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) &&
2876 ((grc_misc_cfg == 0x8000) || (grc_misc_cfg == 0x4000))) ||
2877 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2878 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM) &&
2879 ((tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901) ||
2880 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2)))) {
2881 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
2882 }
2883
2884 err = tg3_phy_probe(tp);
2885 if (err) {
2886 printf("phy probe failed, err %d\n", err);
2887 }
2888
2889 tg3_read_partno(tp);
2890
2891
2892 /* 5700 BX chips need to have their TX producer index mailboxes
2893 * written twice to workaround a bug.
2894 * In etherboot we do this unconditionally to simplify things.
2895 */
2896
2897 /* 5700 chips can get confused if TX buffers straddle the
2898 * 4GB address boundary in some cases.
2899 *
2900 * In etherboot we can ignore the problem as etherboot lives below 4GB.
2901 */
2902
2903 /* In etherboot wake-on-lan is unconditionally disabled */
2904 return err;
2905 }
2906
tg3_get_device_address(struct tg3 * tp)2907 static int tg3_get_device_address(struct tg3 *tp)
2908 {
2909 struct nic *nic = tp->nic;
2910 uint32_t hi, lo, mac_offset;
2911
2912 if (PCI_FUNC(tp->pdev->devfn) == 0)
2913 mac_offset = 0x7c;
2914 else
2915 mac_offset = 0xcc;
2916
2917 /* First try to get it from MAC address mailbox. */
2918 tg3_read_mem(NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
2919 if ((hi >> 16) == 0x484b) {
2920 nic->node_addr[0] = (hi >> 8) & 0xff;
2921 nic->node_addr[1] = (hi >> 0) & 0xff;
2922
2923 tg3_read_mem(NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
2924 nic->node_addr[2] = (lo >> 24) & 0xff;
2925 nic->node_addr[3] = (lo >> 16) & 0xff;
2926 nic->node_addr[4] = (lo >> 8) & 0xff;
2927 nic->node_addr[5] = (lo >> 0) & 0xff;
2928 }
2929 /* Next, try NVRAM. */
2930 else if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
2931 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
2932 nic->node_addr[0] = ((hi >> 16) & 0xff);
2933 nic->node_addr[1] = ((hi >> 24) & 0xff);
2934 nic->node_addr[2] = ((lo >> 0) & 0xff);
2935 nic->node_addr[3] = ((lo >> 8) & 0xff);
2936 nic->node_addr[4] = ((lo >> 16) & 0xff);
2937 nic->node_addr[5] = ((lo >> 24) & 0xff);
2938 }
2939 /* Finally just fetch it out of the MAC control regs. */
2940 else {
2941 hi = tr32(MAC_ADDR_0_HIGH);
2942 lo = tr32(MAC_ADDR_0_LOW);
2943
2944 nic->node_addr[5] = lo & 0xff;
2945 nic->node_addr[4] = (lo >> 8) & 0xff;
2946 nic->node_addr[3] = (lo >> 16) & 0xff;
2947 nic->node_addr[2] = (lo >> 24) & 0xff;
2948 nic->node_addr[1] = hi & 0xff;
2949 nic->node_addr[0] = (hi >> 8) & 0xff;
2950 }
2951
2952 return 0;
2953 }
2954
2955
tg3_setup_dma(struct tg3 * tp)2956 static int tg3_setup_dma(struct tg3 *tp)
2957 {
2958 tw32(TG3PCI_CLOCK_CTRL, 0);
2959
2960 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) == 0) {
2961 tp->dma_rwctrl =
2962 (0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
2963 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT) |
2964 (0x7 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
2965 (0x7 << DMA_RWCTRL_READ_WATER_SHIFT) |
2966 (0x0f << DMA_RWCTRL_MIN_DMA_SHIFT);
2967 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2968 tp->dma_rwctrl &= ~(DMA_RWCTRL_MIN_DMA << DMA_RWCTRL_MIN_DMA_SHIFT);
2969 }
2970 } else {
2971 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
2972 tp->dma_rwctrl =
2973 (0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
2974 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT) |
2975 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
2976 (0x7 << DMA_RWCTRL_READ_WATER_SHIFT) |
2977 (0x00 << DMA_RWCTRL_MIN_DMA_SHIFT);
2978 else
2979 tp->dma_rwctrl =
2980 (0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
2981 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT) |
2982 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
2983 (0x3 << DMA_RWCTRL_READ_WATER_SHIFT) |
2984 (0x0f << DMA_RWCTRL_MIN_DMA_SHIFT);
2985
2986 /* Wheee, some more chip bugs... */
2987 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
2988 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)) {
2989 uint32_t ccval = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
2990
2991 if ((ccval == 0x6) || (ccval == 0x7)) {
2992 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
2993 }
2994 }
2995 }
2996
2997 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
2998 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)) {
2999 tp->dma_rwctrl &= ~(DMA_RWCTRL_MIN_DMA << DMA_RWCTRL_MIN_DMA_SHIFT);
3000 }
3001
3002 /*
3003 Alf : Tried that, but it does not work. Should be this way though :-(
3004 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3005 tp->dma_rwctrl |= 0x001f0000;
3006 }
3007 */
3008 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
3009
3010 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
3011
3012 return 0;
3013 }
3014
tg3_init_link_config(struct tg3 * tp)3015 static void tg3_init_link_config(struct tg3 *tp)
3016 {
3017 tp->link_config.advertising =
3018 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3019 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3020 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
3021 ADVERTISED_Autoneg | ADVERTISED_MII);
3022 tp->carrier_ok = 0;
3023 tp->link_config.active_speed = SPEED_INVALID;
3024 tp->link_config.active_duplex = DUPLEX_INVALID;
3025 }
3026
3027
3028 #if SUPPORT_PHY_STR
tg3_phy_string(struct tg3 * tp)3029 static const char * tg3_phy_string(struct tg3 *tp)
3030 {
3031 switch (tp->phy_id & PHY_ID_MASK) {
3032 case PHY_ID_BCM5400: return "5400";
3033 case PHY_ID_BCM5401: return "5401";
3034 case PHY_ID_BCM5411: return "5411";
3035 case PHY_ID_BCM5701: return "5701";
3036 case PHY_ID_BCM5703: return "5703";
3037 case PHY_ID_BCM5704: return "5704";
3038 case PHY_ID_BCM5705: return "5705";
3039 case PHY_ID_BCM5750: return "5750";
3040 case PHY_ID_BCM5751: return "5751";
3041 case PHY_ID_BCM5787: return "5787";
3042 case PHY_ID_BCM8002: return "8002/serdes";
3043 case PHY_ID_SERDES: return "serdes";
3044 default: return "unknown";
3045 };
3046 }
3047 #else
3048 #define tg3_phy_string(TP) "?"
3049 #endif
3050
3051
tg3_poll_link(struct tg3 * tp)3052 static void tg3_poll_link(struct tg3 *tp)
3053 {
3054 uint32_t mac_stat;
3055
3056 mac_stat = tr32(MAC_STATUS);
3057 if (tp->phy_id == PHY_ID_SERDES) {
3058 if (tp->carrier_ok?
3059 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED):
3060 (mac_stat & MAC_STATUS_PCS_SYNCED)) {
3061 tw32_carefully(MAC_MODE, tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK);
3062 tw32_carefully(MAC_MODE, tp->mac_mode);
3063
3064 tg3_setup_phy(tp);
3065 }
3066 }
3067 else {
3068 if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) {
3069 tg3_setup_phy(tp);
3070 }
3071 }
3072 }
3073
3074 /**************************************************************************
3075 POLL - Wait for a frame
3076 ***************************************************************************/
tg3_ack_irqs(struct tg3 * tp)3077 static void tg3_ack_irqs(struct tg3 *tp)
3078 {
3079 if (tp->hw_status->status & SD_STATUS_UPDATED) {
3080 /*
3081 * writing any value to intr-mbox-0 clears PCI INTA# and
3082 * chip-internal interrupt pending events.
3083 * writing non-zero to intr-mbox-0 additional tells the
3084 * NIC to stop sending us irqs, engaging "in-intr-handler"
3085 * event coalescing.
3086 */
3087 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3088 0x00000001);
3089 /*
3090 * Flush PCI write. This also guarantees that our
3091 * status block has been flushed to host memory.
3092 */
3093 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3094 tp->hw_status->status &= ~SD_STATUS_UPDATED;
3095 }
3096 }
3097
tg3_poll(struct nic * nic,int retrieve)3098 static int tg3_poll(struct nic *nic, int retrieve)
3099 {
3100 /* return true if there's an ethernet packet ready to read */
3101 /* nic->packet should contain data on return */
3102 /* nic->packetlen should contain length of data */
3103
3104 struct tg3 *tp = &tg3;
3105 int result;
3106
3107 result = 0;
3108
3109 if ( (tp->hw_status->idx[0].rx_producer != tp->rx_rcb_ptr) && !retrieve )
3110 return 1;
3111
3112 tg3_ack_irqs(tp);
3113
3114 if (tp->hw_status->idx[0].rx_producer != tp->rx_rcb_ptr) {
3115 struct tg3_rx_buffer_desc *desc;
3116 unsigned int len;
3117 desc = &tp->rx_rcb[tp->rx_rcb_ptr];
3118 if ((desc->opaque & RXD_OPAQUE_RING_MASK) == RXD_OPAQUE_RING_STD) {
3119 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3120
3121 nic->packetlen = len;
3122 memcpy(nic->packet, bus_to_virt(desc->addr_lo), len);
3123 result = 1;
3124 }
3125 tp->rx_rcb_ptr = (tp->rx_rcb_ptr + 1) % TG3_RX_RCB_RING_SIZE;
3126
3127 /* ACK the status ring */
3128 tw32_mailbox2(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, tp->rx_rcb_ptr);
3129
3130 /* Refill RX ring. */
3131 if (result) {
3132 tp->rx_std_ptr = (tp->rx_std_ptr + 1) % TG3_RX_RING_SIZE;
3133 tw32_mailbox2(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, tp->rx_std_ptr);
3134 }
3135 }
3136 tg3_poll_link(tp);
3137 return result;
3138 }
3139
3140 /**************************************************************************
3141 TRANSMIT - Transmit a frame
3142 ***************************************************************************/
3143 #if 0
3144 static void tg3_set_txd(struct tg3 *tp, int entry,
3145 dma_addr_t mapping, int len, uint32_t flags,
3146 uint32_t mss_and_is_end)
3147 {
3148 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3149 int is_end = (mss_and_is_end & 0x1);
3150 if (is_end) {
3151 flags |= TXD_FLAG_END;
3152 }
3153
3154 txd->addr_hi = 0;
3155 txd->addr_lo = mapping & 0xffffffff;
3156 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3157 txd->vlan_tag = 0 << TXD_VLAN_TAG_SHIFT;
3158 }
3159 #endif
3160
tg3_transmit(struct nic * nic,const char * dst_addr,unsigned int type,unsigned int size,const char * packet)3161 static void tg3_transmit(struct nic *nic, const char *dst_addr,
3162 unsigned int type, unsigned int size, const char *packet)
3163 {
3164 static int frame_idx;
3165 struct eth_frame *frame;
3166
3167 /* send the packet to destination */
3168 struct tg3_tx_buffer_desc *txd;
3169 struct tg3 *tp;
3170 uint32_t entry;
3171 int i;
3172
3173 /* Wait until there is a free packet frame */
3174 tp = &tg3;
3175 i = 0;
3176 entry = tp->tx_prod;
3177 while((tp->hw_status->idx[0].tx_consumer != entry) &&
3178 (tp->hw_status->idx[0].tx_consumer != PREV_TX(entry))) {
3179 mdelay(10); /* give the nick a chance */
3180 if (++i > 500) { /* timeout 5s for transmit */
3181 printf("transmit timed out\n");
3182 tg3_halt(tp);
3183 tg3_setup_hw(tp);
3184 return;
3185 }
3186 }
3187 if (i != 0) {
3188 printf("#");
3189 }
3190
3191 /* Copy the packet to the our local buffer */
3192 frame = &tg3_bss.tx_frame[frame_idx];
3193 memcpy(frame->dst_addr, dst_addr, ETH_ALEN);
3194 memcpy(frame->src_addr, nic->node_addr, ETH_ALEN);
3195 frame->type = htons(type);
3196 memset(frame->data, 0, sizeof(frame->data));
3197 memcpy(frame->data, packet, size);
3198
3199 /* Setup the ring buffer entry to transmit */
3200 txd = &tp->tx_ring[entry];
3201 txd->addr_hi = 0; /* Etherboot runs under 4GB */
3202 txd->addr_lo = virt_to_bus(frame);
3203 txd->len_flags = ((size + ETH_HLEN) << TXD_LEN_SHIFT) | TXD_FLAG_END;
3204 txd->vlan_tag = 0 << TXD_VLAN_TAG_SHIFT;
3205
3206 /* Advance to the next entry */
3207 entry = NEXT_TX(entry);
3208 frame_idx ^= 1;
3209
3210 /* Packets are ready, update Tx producer idx local and on card */
3211 tw32_mailbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3212 tw32_mailbox2((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3213 tp->tx_prod = entry;
3214 }
3215
3216 /**************************************************************************
3217 DISABLE - Turn off ethernet interface
3218 ***************************************************************************/
tg3_disable(struct nic * nic __unused)3219 static void tg3_disable ( struct nic *nic __unused ) {
3220 struct tg3 *tp = &tg3;
3221 /* put the card in its initial state */
3222 /* This function serves 3 purposes.
3223 * This disables DMA and interrupts so we don't receive
3224 * unexpected packets or interrupts from the card after
3225 * etherboot has finished.
3226 * This frees resources so etherboot may use
3227 * this driver on another interface
3228 * This allows etherboot to reinitialize the interface
3229 * if something is something goes wrong.
3230 */
3231 tg3_halt(tp);
3232 tp->tg3_flags &= ~(TG3_FLAG_INIT_COMPLETE|TG3_FLAG_GOT_SERDES_FLOWCTL);
3233 tp->carrier_ok = 0;
3234 iounmap((void *)tp->regs);
3235 }
3236
3237 /**************************************************************************
3238 IRQ - Enable, Disable, or Force interrupts
3239 ***************************************************************************/
tg3_irq(struct nic * nic __unused,irq_action_t action __unused)3240 static void tg3_irq(struct nic *nic __unused, irq_action_t action __unused)
3241 {
3242 switch ( action ) {
3243 case DISABLE :
3244 break;
3245 case ENABLE :
3246 break;
3247 case FORCE :
3248 break;
3249 }
3250 }
3251
3252 static struct nic_operations tg3_operations = {
3253 .connect = dummy_connect,
3254 .poll = tg3_poll,
3255 .transmit = tg3_transmit,
3256 .irq = tg3_irq,
3257
3258 };
3259
3260 /**************************************************************************
3261 PROBE - Look for an adapter, this routine's visible to the outside
3262 You should omit the last argument struct pci_device * for a non-PCI NIC
3263 ***************************************************************************/
tg3_probe(struct nic * nic,struct pci_device * pdev)3264 static int tg3_probe ( struct nic *nic, struct pci_device *pdev ) {
3265
3266 struct tg3 *tp = &tg3;
3267 unsigned long tg3reg_base, tg3reg_len;
3268 int i, err, pm_cap;
3269
3270 memset(tp, 0, sizeof(*tp));
3271
3272 adjust_pci_device(pdev);
3273
3274 nic->irqno = 0;
3275 nic->ioaddr = pdev->ioaddr;
3276
3277 /* Find power-management capability. */
3278 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
3279 if (pm_cap == 0) {
3280 printf("Cannot find PowerManagement capability, aborting.\n");
3281 return 0;
3282 }
3283 tg3reg_base = pci_bar_start(pdev, PCI_BASE_ADDRESS_0);
3284 if (tg3reg_base == -1UL) {
3285 printf("Unuseable bar\n");
3286 return 0;
3287 }
3288 tg3reg_len = pci_bar_size(pdev, PCI_BASE_ADDRESS_0);
3289
3290 tp->pdev = pdev;
3291 tp->nic = nic;
3292 tp->pm_cap = pm_cap;
3293 tp->rx_mode = 0;
3294 tp->tx_mode = 0;
3295 tp->mi_mode = MAC_MI_MODE_BASE;
3296 tp->tg3_flags = 0 & ~TG3_FLAG_INIT_COMPLETE;
3297
3298 /* The word/byte swap controls here control register access byte
3299 * swapping. DMA data byte swapping is controlled in the GRC_MODE
3300 * setting below.
3301 */
3302 tp->misc_host_ctrl =
3303 MISC_HOST_CTRL_MASK_PCI_INT |
3304 MISC_HOST_CTRL_WORD_SWAP |
3305 MISC_HOST_CTRL_INDIR_ACCESS |
3306 MISC_HOST_CTRL_PCISTATE_RW;
3307
3308 /* The NONFRM (non-frame) byte/word swap controls take effect
3309 * on descriptor entries, anything which isn't packet data.
3310 *
3311 * The StrongARM chips on the board (one for tx, one for rx)
3312 * are running in big-endian mode.
3313 */
3314 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
3315 GRC_MODE_WSWAP_NONFRM_DATA);
3316 #if __BYTE_ORDER == __BIG_ENDIAN
3317 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
3318 #endif
3319 tp->regs = (unsigned long) ioremap(tg3reg_base, tg3reg_len);
3320 if (tp->regs == 0UL) {
3321 printf("Cannot map device registers, aborting\n");
3322 return 0;
3323 }
3324
3325 tg3_init_link_config(tp);
3326
3327 err = tg3_get_invariants(tp);
3328 if (err) {
3329 printf("Problem fetching invariants of chip, aborting.\n");
3330 goto err_out_iounmap;
3331 }
3332
3333 err = tg3_get_device_address(tp);
3334 if (err) {
3335 printf("Could not obtain valid ethernet address, aborting.\n");
3336 goto err_out_iounmap;
3337 }
3338
3339 DBG ( "Ethernet addr: %s\n", eth_ntoa ( nic->node_addr ) );
3340
3341 tg3_setup_dma(tp);
3342
3343 /* Now that we have fully setup the chip, save away a snapshot
3344 * of the PCI config space. We need to restore this after
3345 * GRC_MISC_CFG core clock resets and some resume events.
3346 */
3347 pci_save_state(tp->pdev, tp->pci_cfg_state);
3348
3349 printf("Tigon3 [partno(%s) rev %hx PHY(%s)] (PCI%s:%s:%s)\n",
3350 tp->board_part_number,
3351 tp->pci_chip_rev_id,
3352 tg3_phy_string(tp),
3353 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
3354 ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
3355 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
3356 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
3357 ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"));
3358
3359
3360 err = tg3_setup_hw(tp);
3361 if (err) {
3362 goto err_out_disable;
3363 }
3364 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
3365
3366 /* Wait for a reasonable time for the link to come up */
3367 tg3_poll_link(tp);
3368 for(i = 0; !tp->carrier_ok && (i < VALID_LINK_TIMEOUT*100); i++) {
3369 mdelay(1);
3370 tg3_poll_link(tp);
3371 }
3372 if (!tp->carrier_ok){
3373 printf("Valid link not established\n");
3374 goto err_out_disable;
3375 }
3376
3377 nic->nic_op = &tg3_operations;
3378 return 1;
3379
3380 err_out_iounmap:
3381 iounmap((void *)tp->regs);
3382 return 0;
3383 err_out_disable:
3384 tg3_disable(nic);
3385 return 0;
3386 }
3387
3388
3389 static struct pci_device_id tg3_nics[] = {
3390 PCI_ROM(0x14e4, 0x1644, "tg3-5700", "Broadcom Tigon 3 5700", 0),
3391 PCI_ROM(0x14e4, 0x1645, "tg3-5701", "Broadcom Tigon 3 5701", 0),
3392 PCI_ROM(0x14e4, 0x1646, "tg3-5702", "Broadcom Tigon 3 5702", 0),
3393 PCI_ROM(0x14e4, 0x1647, "tg3-5703", "Broadcom Tigon 3 5703", 0),
3394 PCI_ROM(0x14e4, 0x1648, "tg3-5704", "Broadcom Tigon 3 5704", 0),
3395 PCI_ROM(0x14e4, 0x164d, "tg3-5702FE", "Broadcom Tigon 3 5702FE", 0),
3396 PCI_ROM(0x14e4, 0x1653, "tg3-5705", "Broadcom Tigon 3 5705", 0),
3397 PCI_ROM(0x14e4, 0x1654, "tg3-5705_2", "Broadcom Tigon 3 5705_2", 0),
3398 PCI_ROM(0x14e4, 0x1659, "tg3-5721", "Broadcom Tigon 3 5721", 0),
3399 PCI_ROM(0x14e4, 0x165d, "tg3-5705M", "Broadcom Tigon 3 5705M", 0),
3400 PCI_ROM(0x14e4, 0x165e, "tg3-5705M_2", "Broadcom Tigon 3 5705M_2", 0),
3401 PCI_ROM(0x14e4, 0x1677, "tg3-5751", "Broadcom Tigon 3 5751", 0),
3402 PCI_ROM(0x14e4, 0x167a, "tg3-5754", "Broadcom Tigon 3 5754", 0),
3403 PCI_ROM(0x14e4, 0x1693, "tg3-5787", "Broadcom Tigon 3 5787", 0),
3404 PCI_ROM(0x14e4, 0x1696, "tg3-5782", "Broadcom Tigon 3 5782", 0),
3405 PCI_ROM(0x14e4, 0x169a, "tg3-5786", "Broadcom Tigon 3 5786", 0),
3406 PCI_ROM(0x14e4, 0x169c, "tg3-5788", "Broadcom Tigon 3 5788", 0),
3407 PCI_ROM(0x14e4, 0x169d, "tg3-5789", "Broadcom Tigon 3 5789", 0),
3408 PCI_ROM(0x14e4, 0x16a6, "tg3-5702X", "Broadcom Tigon 3 5702X", 0),
3409 PCI_ROM(0x14e4, 0x16a7, "tg3-5703X", "Broadcom Tigon 3 5703X", 0),
3410 PCI_ROM(0x14e4, 0x16a8, "tg3-5704S", "Broadcom Tigon 3 5704S", 0),
3411 PCI_ROM(0x14e4, 0x16c6, "tg3-5702A3", "Broadcom Tigon 3 5702A3", 0),
3412 PCI_ROM(0x14e4, 0x16c7, "tg3-5703A3", "Broadcom Tigon 3 5703A3", 0),
3413 PCI_ROM(0x14e4, 0x170d, "tg3-5901", "Broadcom Tigon 3 5901", 0),
3414 PCI_ROM(0x14e4, 0x170e, "tg3-5901_2", "Broadcom Tigon 3 5901_2", 0),
3415 PCI_ROM(0x1148, 0x4400, "tg3-9DXX", "Syskonnect 9DXX", 0),
3416 PCI_ROM(0x1148, 0x4500, "tg3-9MXX", "Syskonnect 9MXX", 0),
3417 PCI_ROM(0x173b, 0x03e8, "tg3-ac1000", "Altima AC1000", 0),
3418 PCI_ROM(0x173b, 0x03e9, "tg3-ac1001", "Altima AC1001", 0),
3419 PCI_ROM(0x173b, 0x03ea, "tg3-ac9100", "Altima AC9100", 0),
3420 PCI_ROM(0x173b, 0x03eb, "tg3-ac1003", "Altima AC1003", 0),
3421 PCI_ROM(0x0e11, 0x00ca, "tg3-hp", "HP Tigon 3", 0),
3422 };
3423
3424 PCI_DRIVER ( tg3_driver, tg3_nics, PCI_NO_CLASS );
3425
3426 DRIVER ( "TG3", nic_driver, pci_driver, tg3_driver,
3427 tg3_probe, tg3_disable );
3428
3429 /*
3430 * Local variables:
3431 * c-basic-offset: 8
3432 * c-indent-level: 8
3433 * tab-width: 8
3434 * End:
3435 */
3436