• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *	Derived from proprietary unpublished source code,
11  *	Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *	Permission is hereby granted for the distribution of this firmware
14  *	data in hexadecimal or equivalent format, provided this copyright
15  *	notice is accompanying it.
16  */
17 
18 
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
44 
45 #include <net/checksum.h>
46 #include <net/ip.h>
47 
48 #include <asm/system.h>
49 #include <asm/io.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
52 
53 #ifdef CONFIG_SPARC
54 #include <asm/idprom.h>
55 #include <asm/prom.h>
56 #endif
57 
58 #define BAR_0	0
59 #define BAR_2	2
60 
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
63 #else
64 #define TG3_VLAN_TAG_USED 0
65 #endif
66 
67 #include "tg3.h"
68 
69 #define DRV_MODULE_NAME		"tg3"
70 #define PFX DRV_MODULE_NAME	": "
71 #define DRV_MODULE_VERSION	"3.97"
72 #define DRV_MODULE_RELDATE	"December 10, 2008"
73 
74 #define TG3_DEF_MAC_MODE	0
75 #define TG3_DEF_RX_MODE		0
76 #define TG3_DEF_TX_MODE		0
77 #define TG3_DEF_MSG_ENABLE	  \
78 	(NETIF_MSG_DRV		| \
79 	 NETIF_MSG_PROBE	| \
80 	 NETIF_MSG_LINK		| \
81 	 NETIF_MSG_TIMER	| \
82 	 NETIF_MSG_IFDOWN	| \
83 	 NETIF_MSG_IFUP		| \
84 	 NETIF_MSG_RX_ERR	| \
85 	 NETIF_MSG_TX_ERR)
86 
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT			(5 * HZ)
91 
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU			60
94 #define TG3_MAX_MTU(tp)	\
95 	((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96 
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE		512
102 #define TG3_DEF_RX_RING_PENDING		200
103 #define TG3_RX_JUMBO_RING_SIZE		256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
105 
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)	\
113 	((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114 
115 #define TG3_TX_RING_SIZE		512
116 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
117 
118 #define TG3_RX_RING_BYTES	(sizeof(struct tg3_rx_buffer_desc) * \
119 				 TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES	(sizeof(struct tg3_rx_buffer_desc) * \
121 			         TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123 			           TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
125 				 TG3_TX_RING_SIZE)
126 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
127 
128 #define RX_PKT_BUF_SZ		(1536 + tp->rx_offset + 64)
129 #define RX_JUMBO_PKT_BUF_SZ	(9046 + tp->rx_offset + 64)
130 
131 /* minimum number of free TX descriptors required to wake up TX process */
132 #define TG3_TX_WAKEUP_THRESH(tp)		((tp)->tx_pending / 4)
133 
134 #define TG3_RAW_IP_ALIGN 2
135 
136 /* number of ETHTOOL_GSTATS u64's */
137 #define TG3_NUM_STATS		(sizeof(struct tg3_ethtool_stats)/sizeof(u64))
138 
139 #define TG3_NUM_TEST		6
140 
141 #define FIRMWARE_TG3		"tigon/tg3.bin"
142 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
143 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
144 
145 static char version[] __devinitdata =
146 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
147 
148 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
149 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
150 MODULE_LICENSE("GPL");
151 MODULE_VERSION(DRV_MODULE_VERSION);
152 MODULE_FIRMWARE(FIRMWARE_TG3);
153 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
154 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
155 
156 
157 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
158 module_param(tg3_debug, int, 0);
159 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
160 
161 static struct pci_device_id tg3_pci_tbl[] = {
162 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
163 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
164 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
165 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
166 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
167 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
168 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
169 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
170 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
171 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
172 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
173 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
174 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
175 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
176 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
177 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
178 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
179 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
180 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
181 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
182 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
183 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
184 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
185 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
186 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
187 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
188 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
189 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
190 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
191 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
192 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
193 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
194 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
195 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
196 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
197 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
198 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
199 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
200 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
201 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
202 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
203 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
204 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
205 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
206 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
207 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
208 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
209 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
210 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
211 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
212 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
213 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
214 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
215 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
216 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
217 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
218 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
219 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
220 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
221 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
222 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
223 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
224 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
225 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
226 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57720)},
227 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
228 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
229 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
230 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
231 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
232 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
233 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
234 	{}
235 };
236 
237 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
238 
239 static const struct {
240 	const char string[ETH_GSTRING_LEN];
241 } ethtool_stats_keys[TG3_NUM_STATS] = {
242 	{ "rx_octets" },
243 	{ "rx_fragments" },
244 	{ "rx_ucast_packets" },
245 	{ "rx_mcast_packets" },
246 	{ "rx_bcast_packets" },
247 	{ "rx_fcs_errors" },
248 	{ "rx_align_errors" },
249 	{ "rx_xon_pause_rcvd" },
250 	{ "rx_xoff_pause_rcvd" },
251 	{ "rx_mac_ctrl_rcvd" },
252 	{ "rx_xoff_entered" },
253 	{ "rx_frame_too_long_errors" },
254 	{ "rx_jabbers" },
255 	{ "rx_undersize_packets" },
256 	{ "rx_in_length_errors" },
257 	{ "rx_out_length_errors" },
258 	{ "rx_64_or_less_octet_packets" },
259 	{ "rx_65_to_127_octet_packets" },
260 	{ "rx_128_to_255_octet_packets" },
261 	{ "rx_256_to_511_octet_packets" },
262 	{ "rx_512_to_1023_octet_packets" },
263 	{ "rx_1024_to_1522_octet_packets" },
264 	{ "rx_1523_to_2047_octet_packets" },
265 	{ "rx_2048_to_4095_octet_packets" },
266 	{ "rx_4096_to_8191_octet_packets" },
267 	{ "rx_8192_to_9022_octet_packets" },
268 
269 	{ "tx_octets" },
270 	{ "tx_collisions" },
271 
272 	{ "tx_xon_sent" },
273 	{ "tx_xoff_sent" },
274 	{ "tx_flow_control" },
275 	{ "tx_mac_errors" },
276 	{ "tx_single_collisions" },
277 	{ "tx_mult_collisions" },
278 	{ "tx_deferred" },
279 	{ "tx_excessive_collisions" },
280 	{ "tx_late_collisions" },
281 	{ "tx_collide_2times" },
282 	{ "tx_collide_3times" },
283 	{ "tx_collide_4times" },
284 	{ "tx_collide_5times" },
285 	{ "tx_collide_6times" },
286 	{ "tx_collide_7times" },
287 	{ "tx_collide_8times" },
288 	{ "tx_collide_9times" },
289 	{ "tx_collide_10times" },
290 	{ "tx_collide_11times" },
291 	{ "tx_collide_12times" },
292 	{ "tx_collide_13times" },
293 	{ "tx_collide_14times" },
294 	{ "tx_collide_15times" },
295 	{ "tx_ucast_packets" },
296 	{ "tx_mcast_packets" },
297 	{ "tx_bcast_packets" },
298 	{ "tx_carrier_sense_errors" },
299 	{ "tx_discards" },
300 	{ "tx_errors" },
301 
302 	{ "dma_writeq_full" },
303 	{ "dma_write_prioq_full" },
304 	{ "rxbds_empty" },
305 	{ "rx_discards" },
306 	{ "rx_errors" },
307 	{ "rx_threshold_hit" },
308 
309 	{ "dma_readq_full" },
310 	{ "dma_read_prioq_full" },
311 	{ "tx_comp_queue_full" },
312 
313 	{ "ring_set_send_prod_index" },
314 	{ "ring_status_update" },
315 	{ "nic_irqs" },
316 	{ "nic_avoided_irqs" },
317 	{ "nic_tx_threshold_hit" }
318 };
319 
320 static const struct {
321 	const char string[ETH_GSTRING_LEN];
322 } ethtool_test_keys[TG3_NUM_TEST] = {
323 	{ "nvram test     (online) " },
324 	{ "link test      (online) " },
325 	{ "register test  (offline)" },
326 	{ "memory test    (offline)" },
327 	{ "loopback test  (offline)" },
328 	{ "interrupt test (offline)" },
329 };
330 
tg3_write32(struct tg3 * tp,u32 off,u32 val)331 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
332 {
333 	writel(val, tp->regs + off);
334 }
335 
tg3_read32(struct tg3 * tp,u32 off)336 static u32 tg3_read32(struct tg3 *tp, u32 off)
337 {
338 	return (readl(tp->regs + off));
339 }
340 
tg3_ape_write32(struct tg3 * tp,u32 off,u32 val)341 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
342 {
343 	writel(val, tp->aperegs + off);
344 }
345 
tg3_ape_read32(struct tg3 * tp,u32 off)346 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
347 {
348 	return (readl(tp->aperegs + off));
349 }
350 
tg3_write_indirect_reg32(struct tg3 * tp,u32 off,u32 val)351 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
352 {
353 	unsigned long flags;
354 
355 	spin_lock_irqsave(&tp->indirect_lock, flags);
356 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
357 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
358 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
359 }
360 
tg3_write_flush_reg32(struct tg3 * tp,u32 off,u32 val)361 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
362 {
363 	writel(val, tp->regs + off);
364 	readl(tp->regs + off);
365 }
366 
tg3_read_indirect_reg32(struct tg3 * tp,u32 off)367 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
368 {
369 	unsigned long flags;
370 	u32 val;
371 
372 	spin_lock_irqsave(&tp->indirect_lock, flags);
373 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
374 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
375 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
376 	return val;
377 }
378 
tg3_write_indirect_mbox(struct tg3 * tp,u32 off,u32 val)379 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
380 {
381 	unsigned long flags;
382 
383 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
384 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
385 				       TG3_64BIT_REG_LOW, val);
386 		return;
387 	}
388 	if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
389 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
390 				       TG3_64BIT_REG_LOW, val);
391 		return;
392 	}
393 
394 	spin_lock_irqsave(&tp->indirect_lock, flags);
395 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
396 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
397 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
398 
399 	/* In indirect mode when disabling interrupts, we also need
400 	 * to clear the interrupt bit in the GRC local ctrl register.
401 	 */
402 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
403 	    (val == 0x1)) {
404 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
405 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
406 	}
407 }
408 
tg3_read_indirect_mbox(struct tg3 * tp,u32 off)409 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
410 {
411 	unsigned long flags;
412 	u32 val;
413 
414 	spin_lock_irqsave(&tp->indirect_lock, flags);
415 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
416 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
417 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
418 	return val;
419 }
420 
421 /* usec_wait specifies the wait time in usec when writing to certain registers
422  * where it is unsafe to read back the register without some delay.
423  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
424  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
425  */
_tw32_flush(struct tg3 * tp,u32 off,u32 val,u32 usec_wait)426 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
427 {
428 	if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
429 	    (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430 		/* Non-posted methods */
431 		tp->write32(tp, off, val);
432 	else {
433 		/* Posted method */
434 		tg3_write32(tp, off, val);
435 		if (usec_wait)
436 			udelay(usec_wait);
437 		tp->read32(tp, off);
438 	}
439 	/* Wait again after the read for the posted method to guarantee that
440 	 * the wait time is met.
441 	 */
442 	if (usec_wait)
443 		udelay(usec_wait);
444 }
445 
tw32_mailbox_flush(struct tg3 * tp,u32 off,u32 val)446 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
447 {
448 	tp->write32_mbox(tp, off, val);
449 	if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
450 	    !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
451 		tp->read32_mbox(tp, off);
452 }
453 
tg3_write32_tx_mbox(struct tg3 * tp,u32 off,u32 val)454 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
455 {
456 	void __iomem *mbox = tp->regs + off;
457 	writel(val, mbox);
458 	if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
459 		writel(val, mbox);
460 	if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
461 		readl(mbox);
462 }
463 
tg3_read32_mbox_5906(struct tg3 * tp,u32 off)464 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
465 {
466 	return (readl(tp->regs + off + GRCMBOX_BASE));
467 }
468 
tg3_write32_mbox_5906(struct tg3 * tp,u32 off,u32 val)469 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
470 {
471 	writel(val, tp->regs + off + GRCMBOX_BASE);
472 }
473 
474 #define tw32_mailbox(reg, val)	tp->write32_mbox(tp, reg, val)
475 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
476 #define tw32_rx_mbox(reg, val)	tp->write32_rx_mbox(tp, reg, val)
477 #define tw32_tx_mbox(reg, val)	tp->write32_tx_mbox(tp, reg, val)
478 #define tr32_mailbox(reg)	tp->read32_mbox(tp, reg)
479 
480 #define tw32(reg,val)		tp->write32(tp, reg, val)
481 #define tw32_f(reg,val)		_tw32_flush(tp,(reg),(val), 0)
482 #define tw32_wait_f(reg,val,us)	_tw32_flush(tp,(reg),(val), (us))
483 #define tr32(reg)		tp->read32(tp, reg)
484 
tg3_write_mem(struct tg3 * tp,u32 off,u32 val)485 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
486 {
487 	unsigned long flags;
488 
489 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
490 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
491 		return;
492 
493 	spin_lock_irqsave(&tp->indirect_lock, flags);
494 	if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
495 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
496 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
497 
498 		/* Always leave this as zero. */
499 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
500 	} else {
501 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
502 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
503 
504 		/* Always leave this as zero. */
505 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
506 	}
507 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 }
509 
tg3_read_mem(struct tg3 * tp,u32 off,u32 * val)510 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
511 {
512 	unsigned long flags;
513 
514 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
515 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
516 		*val = 0;
517 		return;
518 	}
519 
520 	spin_lock_irqsave(&tp->indirect_lock, flags);
521 	if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
522 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
523 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
524 
525 		/* Always leave this as zero. */
526 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
527 	} else {
528 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
529 		*val = tr32(TG3PCI_MEM_WIN_DATA);
530 
531 		/* Always leave this as zero. */
532 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
533 	}
534 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
535 }
536 
tg3_ape_lock_init(struct tg3 * tp)537 static void tg3_ape_lock_init(struct tg3 *tp)
538 {
539 	int i;
540 
541 	/* Make sure the driver hasn't any stale locks. */
542 	for (i = 0; i < 8; i++)
543 		tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
544 				APE_LOCK_GRANT_DRIVER);
545 }
546 
tg3_ape_lock(struct tg3 * tp,int locknum)547 static int tg3_ape_lock(struct tg3 *tp, int locknum)
548 {
549 	int i, off;
550 	int ret = 0;
551 	u32 status;
552 
553 	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
554 		return 0;
555 
556 	switch (locknum) {
557 		case TG3_APE_LOCK_GRC:
558 		case TG3_APE_LOCK_MEM:
559 			break;
560 		default:
561 			return -EINVAL;
562 	}
563 
564 	off = 4 * locknum;
565 
566 	tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
567 
568 	/* Wait for up to 1 millisecond to acquire lock. */
569 	for (i = 0; i < 100; i++) {
570 		status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
571 		if (status == APE_LOCK_GRANT_DRIVER)
572 			break;
573 		udelay(10);
574 	}
575 
576 	if (status != APE_LOCK_GRANT_DRIVER) {
577 		/* Revoke the lock request. */
578 		tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
579 				APE_LOCK_GRANT_DRIVER);
580 
581 		ret = -EBUSY;
582 	}
583 
584 	return ret;
585 }
586 
tg3_ape_unlock(struct tg3 * tp,int locknum)587 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
588 {
589 	int off;
590 
591 	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
592 		return;
593 
594 	switch (locknum) {
595 		case TG3_APE_LOCK_GRC:
596 		case TG3_APE_LOCK_MEM:
597 			break;
598 		default:
599 			return;
600 	}
601 
602 	off = 4 * locknum;
603 	tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
604 }
605 
tg3_disable_ints(struct tg3 * tp)606 static void tg3_disable_ints(struct tg3 *tp)
607 {
608 	tw32(TG3PCI_MISC_HOST_CTRL,
609 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
610 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
611 }
612 
tg3_cond_int(struct tg3 * tp)613 static inline void tg3_cond_int(struct tg3 *tp)
614 {
615 	if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
616 	    (tp->hw_status->status & SD_STATUS_UPDATED))
617 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
618 	else
619 		tw32(HOSTCC_MODE, tp->coalesce_mode |
620 		     (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
621 }
622 
tg3_enable_ints(struct tg3 * tp)623 static void tg3_enable_ints(struct tg3 *tp)
624 {
625 	tp->irq_sync = 0;
626 	wmb();
627 
628 	tw32(TG3PCI_MISC_HOST_CTRL,
629 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
630 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
631 		       (tp->last_tag << 24));
632 	if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
633 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
634 			       (tp->last_tag << 24));
635 	tg3_cond_int(tp);
636 }
637 
tg3_has_work(struct tg3 * tp)638 static inline unsigned int tg3_has_work(struct tg3 *tp)
639 {
640 	struct tg3_hw_status *sblk = tp->hw_status;
641 	unsigned int work_exists = 0;
642 
643 	/* check for phy events */
644 	if (!(tp->tg3_flags &
645 	      (TG3_FLAG_USE_LINKCHG_REG |
646 	       TG3_FLAG_POLL_SERDES))) {
647 		if (sblk->status & SD_STATUS_LINK_CHG)
648 			work_exists = 1;
649 	}
650 	/* check for RX/TX work to do */
651 	if (sblk->idx[0].tx_consumer != tp->tx_cons ||
652 	    sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
653 		work_exists = 1;
654 
655 	return work_exists;
656 }
657 
658 /* tg3_restart_ints
659  *  similar to tg3_enable_ints, but it accurately determines whether there
660  *  is new work pending and can return without flushing the PIO write
661  *  which reenables interrupts
662  */
tg3_restart_ints(struct tg3 * tp)663 static void tg3_restart_ints(struct tg3 *tp)
664 {
665 	tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
666 		     tp->last_tag << 24);
667 	mmiowb();
668 
669 	/* When doing tagged status, this work check is unnecessary.
670 	 * The last_tag we write above tells the chip which piece of
671 	 * work we've completed.
672 	 */
673 	if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
674 	    tg3_has_work(tp))
675 		tw32(HOSTCC_MODE, tp->coalesce_mode |
676 		     (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
677 }
678 
tg3_netif_stop(struct tg3 * tp)679 static inline void tg3_netif_stop(struct tg3 *tp)
680 {
681 	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
682 	napi_disable(&tp->napi);
683 	netif_tx_disable(tp->dev);
684 }
685 
tg3_netif_start(struct tg3 * tp)686 static inline void tg3_netif_start(struct tg3 *tp)
687 {
688 	netif_wake_queue(tp->dev);
689 	/* NOTE: unconditional netif_wake_queue is only appropriate
690 	 * so long as all callers are assured to have free tx slots
691 	 * (such as after tg3_init_hw)
692 	 */
693 	napi_enable(&tp->napi);
694 	tp->hw_status->status |= SD_STATUS_UPDATED;
695 	tg3_enable_ints(tp);
696 }
697 
tg3_switch_clocks(struct tg3 * tp)698 static void tg3_switch_clocks(struct tg3 *tp)
699 {
700 	u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
701 	u32 orig_clock_ctrl;
702 
703 	if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
704 	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
705 		return;
706 
707 	orig_clock_ctrl = clock_ctrl;
708 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
709 		       CLOCK_CTRL_CLKRUN_OENABLE |
710 		       0x1f);
711 	tp->pci_clock_ctrl = clock_ctrl;
712 
713 	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
714 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
715 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
716 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
717 		}
718 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
719 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
720 			    clock_ctrl |
721 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
722 			    40);
723 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
724 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
725 			    40);
726 	}
727 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
728 }
729 
730 #define PHY_BUSY_LOOPS	5000
731 
tg3_readphy(struct tg3 * tp,int reg,u32 * val)732 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
733 {
734 	u32 frame_val;
735 	unsigned int loops;
736 	int ret;
737 
738 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
739 		tw32_f(MAC_MI_MODE,
740 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
741 		udelay(80);
742 	}
743 
744 	*val = 0x0;
745 
746 	frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
747 		      MI_COM_PHY_ADDR_MASK);
748 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
749 		      MI_COM_REG_ADDR_MASK);
750 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
751 
752 	tw32_f(MAC_MI_COM, frame_val);
753 
754 	loops = PHY_BUSY_LOOPS;
755 	while (loops != 0) {
756 		udelay(10);
757 		frame_val = tr32(MAC_MI_COM);
758 
759 		if ((frame_val & MI_COM_BUSY) == 0) {
760 			udelay(5);
761 			frame_val = tr32(MAC_MI_COM);
762 			break;
763 		}
764 		loops -= 1;
765 	}
766 
767 	ret = -EBUSY;
768 	if (loops != 0) {
769 		*val = frame_val & MI_COM_DATA_MASK;
770 		ret = 0;
771 	}
772 
773 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
774 		tw32_f(MAC_MI_MODE, tp->mi_mode);
775 		udelay(80);
776 	}
777 
778 	return ret;
779 }
780 
tg3_writephy(struct tg3 * tp,int reg,u32 val)781 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
782 {
783 	u32 frame_val;
784 	unsigned int loops;
785 	int ret;
786 
787 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
788 	    (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
789 		return 0;
790 
791 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
792 		tw32_f(MAC_MI_MODE,
793 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
794 		udelay(80);
795 	}
796 
797 	frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
798 		      MI_COM_PHY_ADDR_MASK);
799 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
800 		      MI_COM_REG_ADDR_MASK);
801 	frame_val |= (val & MI_COM_DATA_MASK);
802 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
803 
804 	tw32_f(MAC_MI_COM, frame_val);
805 
806 	loops = PHY_BUSY_LOOPS;
807 	while (loops != 0) {
808 		udelay(10);
809 		frame_val = tr32(MAC_MI_COM);
810 		if ((frame_val & MI_COM_BUSY) == 0) {
811 			udelay(5);
812 			frame_val = tr32(MAC_MI_COM);
813 			break;
814 		}
815 		loops -= 1;
816 	}
817 
818 	ret = -EBUSY;
819 	if (loops != 0)
820 		ret = 0;
821 
822 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
823 		tw32_f(MAC_MI_MODE, tp->mi_mode);
824 		udelay(80);
825 	}
826 
827 	return ret;
828 }
829 
tg3_bmcr_reset(struct tg3 * tp)830 static int tg3_bmcr_reset(struct tg3 *tp)
831 {
832 	u32 phy_control;
833 	int limit, err;
834 
835 	/* OK, reset it, and poll the BMCR_RESET bit until it
836 	 * clears or we time out.
837 	 */
838 	phy_control = BMCR_RESET;
839 	err = tg3_writephy(tp, MII_BMCR, phy_control);
840 	if (err != 0)
841 		return -EBUSY;
842 
843 	limit = 5000;
844 	while (limit--) {
845 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
846 		if (err != 0)
847 			return -EBUSY;
848 
849 		if ((phy_control & BMCR_RESET) == 0) {
850 			udelay(40);
851 			break;
852 		}
853 		udelay(10);
854 	}
855 	if (limit < 0)
856 		return -EBUSY;
857 
858 	return 0;
859 }
860 
tg3_mdio_read(struct mii_bus * bp,int mii_id,int reg)861 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
862 {
863 	struct tg3 *tp = (struct tg3 *)bp->priv;
864 	u32 val;
865 
866 	if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
867 		return -EAGAIN;
868 
869 	if (tg3_readphy(tp, reg, &val))
870 		return -EIO;
871 
872 	return val;
873 }
874 
tg3_mdio_write(struct mii_bus * bp,int mii_id,int reg,u16 val)875 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
876 {
877 	struct tg3 *tp = (struct tg3 *)bp->priv;
878 
879 	if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
880 		return -EAGAIN;
881 
882 	if (tg3_writephy(tp, reg, val))
883 		return -EIO;
884 
885 	return 0;
886 }
887 
tg3_mdio_reset(struct mii_bus * bp)888 static int tg3_mdio_reset(struct mii_bus *bp)
889 {
890 	return 0;
891 }
892 
tg3_mdio_config_5785(struct tg3 * tp)893 static void tg3_mdio_config_5785(struct tg3 *tp)
894 {
895 	u32 val;
896 	struct phy_device *phydev;
897 
898 	phydev = tp->mdio_bus->phy_map[PHY_ADDR];
899 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
900 	case TG3_PHY_ID_BCM50610:
901 		val = MAC_PHYCFG2_50610_LED_MODES;
902 		break;
903 	case TG3_PHY_ID_BCMAC131:
904 		val = MAC_PHYCFG2_AC131_LED_MODES;
905 		break;
906 	case TG3_PHY_ID_RTL8211C:
907 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
908 		break;
909 	case TG3_PHY_ID_RTL8201E:
910 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
911 		break;
912 	default:
913 		return;
914 	}
915 
916 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
917 		tw32(MAC_PHYCFG2, val);
918 
919 		val = tr32(MAC_PHYCFG1);
920 		val &= ~MAC_PHYCFG1_RGMII_INT;
921 		tw32(MAC_PHYCFG1, val);
922 
923 		return;
924 	}
925 
926 	if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
927 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
928 		       MAC_PHYCFG2_FMODE_MASK_MASK |
929 		       MAC_PHYCFG2_GMODE_MASK_MASK |
930 		       MAC_PHYCFG2_ACT_MASK_MASK   |
931 		       MAC_PHYCFG2_QUAL_MASK_MASK |
932 		       MAC_PHYCFG2_INBAND_ENABLE;
933 
934 	tw32(MAC_PHYCFG2, val);
935 
936 	val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
937 				    MAC_PHYCFG1_RGMII_SND_STAT_EN);
938 	if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
939 		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
940 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
941 		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
942 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
943 	}
944 	tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
945 
946 	val = tr32(MAC_EXT_RGMII_MODE);
947 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
948 		 MAC_RGMII_MODE_RX_QUALITY |
949 		 MAC_RGMII_MODE_RX_ACTIVITY |
950 		 MAC_RGMII_MODE_RX_ENG_DET |
951 		 MAC_RGMII_MODE_TX_ENABLE |
952 		 MAC_RGMII_MODE_TX_LOWPWR |
953 		 MAC_RGMII_MODE_TX_RESET);
954 	if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
955 		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
956 			val |= MAC_RGMII_MODE_RX_INT_B |
957 			       MAC_RGMII_MODE_RX_QUALITY |
958 			       MAC_RGMII_MODE_RX_ACTIVITY |
959 			       MAC_RGMII_MODE_RX_ENG_DET;
960 		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
961 			val |= MAC_RGMII_MODE_TX_ENABLE |
962 			       MAC_RGMII_MODE_TX_LOWPWR |
963 			       MAC_RGMII_MODE_TX_RESET;
964 	}
965 	tw32(MAC_EXT_RGMII_MODE, val);
966 }
967 
tg3_mdio_start(struct tg3 * tp)968 static void tg3_mdio_start(struct tg3 *tp)
969 {
970 	if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
971 		mutex_lock(&tp->mdio_bus->mdio_lock);
972 		tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
973 		mutex_unlock(&tp->mdio_bus->mdio_lock);
974 	}
975 
976 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
977 	tw32_f(MAC_MI_MODE, tp->mi_mode);
978 	udelay(80);
979 
980 	if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
981 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
982 		tg3_mdio_config_5785(tp);
983 }
984 
tg3_mdio_stop(struct tg3 * tp)985 static void tg3_mdio_stop(struct tg3 *tp)
986 {
987 	if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
988 		mutex_lock(&tp->mdio_bus->mdio_lock);
989 		tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
990 		mutex_unlock(&tp->mdio_bus->mdio_lock);
991 	}
992 }
993 
tg3_mdio_init(struct tg3 * tp)994 static int tg3_mdio_init(struct tg3 *tp)
995 {
996 	int i;
997 	u32 reg;
998 	struct phy_device *phydev;
999 
1000 	tg3_mdio_start(tp);
1001 
1002 	if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1003 	    (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1004 		return 0;
1005 
1006 	tp->mdio_bus = mdiobus_alloc();
1007 	if (tp->mdio_bus == NULL)
1008 		return -ENOMEM;
1009 
1010 	tp->mdio_bus->name     = "tg3 mdio bus";
1011 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1012 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1013 	tp->mdio_bus->priv     = tp;
1014 	tp->mdio_bus->parent   = &tp->pdev->dev;
1015 	tp->mdio_bus->read     = &tg3_mdio_read;
1016 	tp->mdio_bus->write    = &tg3_mdio_write;
1017 	tp->mdio_bus->reset    = &tg3_mdio_reset;
1018 	tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1019 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1020 
1021 	for (i = 0; i < PHY_MAX_ADDR; i++)
1022 		tp->mdio_bus->irq[i] = PHY_POLL;
1023 
1024 	/* The bus registration will look for all the PHYs on the mdio bus.
1025 	 * Unfortunately, it does not ensure the PHY is powered up before
1026 	 * accessing the PHY ID registers.  A chip reset is the
1027 	 * quickest way to bring the device back to an operational state..
1028 	 */
1029 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1030 		tg3_bmcr_reset(tp);
1031 
1032 	i = mdiobus_register(tp->mdio_bus);
1033 	if (i) {
1034 		printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1035 			tp->dev->name, i);
1036 		mdiobus_free(tp->mdio_bus);
1037 		return i;
1038 	}
1039 
1040 	phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1041 
1042 	if (!phydev || !phydev->drv) {
1043 		printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1044 		mdiobus_unregister(tp->mdio_bus);
1045 		mdiobus_free(tp->mdio_bus);
1046 		return -ENODEV;
1047 	}
1048 
1049 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1050 	case TG3_PHY_ID_BCM57780:
1051 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1052 		break;
1053 	case TG3_PHY_ID_BCM50610:
1054 		if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1055 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1056 		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1057 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1058 		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1059 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1060 		/* fallthru */
1061 	case TG3_PHY_ID_RTL8211C:
1062 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1063 		break;
1064 	case TG3_PHY_ID_RTL8201E:
1065 	case TG3_PHY_ID_BCMAC131:
1066 		phydev->interface = PHY_INTERFACE_MODE_MII;
1067 		break;
1068 	}
1069 
1070 	tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1071 
1072 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1073 		tg3_mdio_config_5785(tp);
1074 
1075 	return 0;
1076 }
1077 
tg3_mdio_fini(struct tg3 * tp)1078 static void tg3_mdio_fini(struct tg3 *tp)
1079 {
1080 	if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1081 		tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1082 		mdiobus_unregister(tp->mdio_bus);
1083 		mdiobus_free(tp->mdio_bus);
1084 		tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1085 	}
1086 }
1087 
1088 /* tp->lock is held. */
tg3_generate_fw_event(struct tg3 * tp)1089 static inline void tg3_generate_fw_event(struct tg3 *tp)
1090 {
1091 	u32 val;
1092 
1093 	val = tr32(GRC_RX_CPU_EVENT);
1094 	val |= GRC_RX_CPU_DRIVER_EVENT;
1095 	tw32_f(GRC_RX_CPU_EVENT, val);
1096 
1097 	tp->last_event_jiffies = jiffies;
1098 }
1099 
1100 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1101 
1102 /* tp->lock is held. */
tg3_wait_for_event_ack(struct tg3 * tp)1103 static void tg3_wait_for_event_ack(struct tg3 *tp)
1104 {
1105 	int i;
1106 	unsigned int delay_cnt;
1107 	long time_remain;
1108 
1109 	/* If enough time has passed, no wait is necessary. */
1110 	time_remain = (long)(tp->last_event_jiffies + 1 +
1111 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1112 		      (long)jiffies;
1113 	if (time_remain < 0)
1114 		return;
1115 
1116 	/* Check if we can shorten the wait time. */
1117 	delay_cnt = jiffies_to_usecs(time_remain);
1118 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1119 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1120 	delay_cnt = (delay_cnt >> 3) + 1;
1121 
1122 	for (i = 0; i < delay_cnt; i++) {
1123 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1124 			break;
1125 		udelay(8);
1126 	}
1127 }
1128 
1129 /* tp->lock is held. */
tg3_ump_link_report(struct tg3 * tp)1130 static void tg3_ump_link_report(struct tg3 *tp)
1131 {
1132 	u32 reg;
1133 	u32 val;
1134 
1135 	if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1136 	    !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1137 		return;
1138 
1139 	tg3_wait_for_event_ack(tp);
1140 
1141 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1142 
1143 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1144 
1145 	val = 0;
1146 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1147 		val = reg << 16;
1148 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1149 		val |= (reg & 0xffff);
1150 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1151 
1152 	val = 0;
1153 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1154 		val = reg << 16;
1155 	if (!tg3_readphy(tp, MII_LPA, &reg))
1156 		val |= (reg & 0xffff);
1157 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1158 
1159 	val = 0;
1160 	if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1161 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1162 			val = reg << 16;
1163 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1164 			val |= (reg & 0xffff);
1165 	}
1166 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1167 
1168 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1169 		val = reg << 16;
1170 	else
1171 		val = 0;
1172 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1173 
1174 	tg3_generate_fw_event(tp);
1175 }
1176 
tg3_link_report(struct tg3 * tp)1177 static void tg3_link_report(struct tg3 *tp)
1178 {
1179 	if (!netif_carrier_ok(tp->dev)) {
1180 		if (netif_msg_link(tp))
1181 			printk(KERN_INFO PFX "%s: Link is down.\n",
1182 			       tp->dev->name);
1183 		tg3_ump_link_report(tp);
1184 	} else if (netif_msg_link(tp)) {
1185 		printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1186 		       tp->dev->name,
1187 		       (tp->link_config.active_speed == SPEED_1000 ?
1188 			1000 :
1189 			(tp->link_config.active_speed == SPEED_100 ?
1190 			 100 : 10)),
1191 		       (tp->link_config.active_duplex == DUPLEX_FULL ?
1192 			"full" : "half"));
1193 
1194 		printk(KERN_INFO PFX
1195 		       "%s: Flow control is %s for TX and %s for RX.\n",
1196 		       tp->dev->name,
1197 		       (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1198 		       "on" : "off",
1199 		       (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1200 		       "on" : "off");
1201 		tg3_ump_link_report(tp);
1202 	}
1203 }
1204 
tg3_advert_flowctrl_1000T(u8 flow_ctrl)1205 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1206 {
1207 	u16 miireg;
1208 
1209 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1210 		miireg = ADVERTISE_PAUSE_CAP;
1211 	else if (flow_ctrl & FLOW_CTRL_TX)
1212 		miireg = ADVERTISE_PAUSE_ASYM;
1213 	else if (flow_ctrl & FLOW_CTRL_RX)
1214 		miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1215 	else
1216 		miireg = 0;
1217 
1218 	return miireg;
1219 }
1220 
tg3_advert_flowctrl_1000X(u8 flow_ctrl)1221 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1222 {
1223 	u16 miireg;
1224 
1225 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1226 		miireg = ADVERTISE_1000XPAUSE;
1227 	else if (flow_ctrl & FLOW_CTRL_TX)
1228 		miireg = ADVERTISE_1000XPSE_ASYM;
1229 	else if (flow_ctrl & FLOW_CTRL_RX)
1230 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1231 	else
1232 		miireg = 0;
1233 
1234 	return miireg;
1235 }
1236 
tg3_resolve_flowctrl_1000X(u16 lcladv,u16 rmtadv)1237 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1238 {
1239 	u8 cap = 0;
1240 
1241 	if (lcladv & ADVERTISE_1000XPAUSE) {
1242 		if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1243 			if (rmtadv & LPA_1000XPAUSE)
1244 				cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1245 			else if (rmtadv & LPA_1000XPAUSE_ASYM)
1246 				cap = FLOW_CTRL_RX;
1247 		} else {
1248 			if (rmtadv & LPA_1000XPAUSE)
1249 				cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1250 		}
1251 	} else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1252 		if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1253 			cap = FLOW_CTRL_TX;
1254 	}
1255 
1256 	return cap;
1257 }
1258 
tg3_setup_flow_control(struct tg3 * tp,u32 lcladv,u32 rmtadv)1259 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1260 {
1261 	u8 autoneg;
1262 	u8 flowctrl = 0;
1263 	u32 old_rx_mode = tp->rx_mode;
1264 	u32 old_tx_mode = tp->tx_mode;
1265 
1266 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1267 		autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1268 	else
1269 		autoneg = tp->link_config.autoneg;
1270 
1271 	if (autoneg == AUTONEG_ENABLE &&
1272 	    (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1273 		if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1274 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1275 		else
1276 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1277 	} else
1278 		flowctrl = tp->link_config.flowctrl;
1279 
1280 	tp->link_config.active_flowctrl = flowctrl;
1281 
1282 	if (flowctrl & FLOW_CTRL_RX)
1283 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1284 	else
1285 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1286 
1287 	if (old_rx_mode != tp->rx_mode)
1288 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1289 
1290 	if (flowctrl & FLOW_CTRL_TX)
1291 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1292 	else
1293 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1294 
1295 	if (old_tx_mode != tp->tx_mode)
1296 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1297 }
1298 
tg3_adjust_link(struct net_device * dev)1299 static void tg3_adjust_link(struct net_device *dev)
1300 {
1301 	u8 oldflowctrl, linkmesg = 0;
1302 	u32 mac_mode, lcl_adv, rmt_adv;
1303 	struct tg3 *tp = netdev_priv(dev);
1304 	struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1305 
1306 	spin_lock(&tp->lock);
1307 
1308 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1309 				    MAC_MODE_HALF_DUPLEX);
1310 
1311 	oldflowctrl = tp->link_config.active_flowctrl;
1312 
1313 	if (phydev->link) {
1314 		lcl_adv = 0;
1315 		rmt_adv = 0;
1316 
1317 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1318 			mac_mode |= MAC_MODE_PORT_MODE_MII;
1319 		else
1320 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
1321 
1322 		if (phydev->duplex == DUPLEX_HALF)
1323 			mac_mode |= MAC_MODE_HALF_DUPLEX;
1324 		else {
1325 			lcl_adv = tg3_advert_flowctrl_1000T(
1326 				  tp->link_config.flowctrl);
1327 
1328 			if (phydev->pause)
1329 				rmt_adv = LPA_PAUSE_CAP;
1330 			if (phydev->asym_pause)
1331 				rmt_adv |= LPA_PAUSE_ASYM;
1332 		}
1333 
1334 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1335 	} else
1336 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
1337 
1338 	if (mac_mode != tp->mac_mode) {
1339 		tp->mac_mode = mac_mode;
1340 		tw32_f(MAC_MODE, tp->mac_mode);
1341 		udelay(40);
1342 	}
1343 
1344 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1345 		if (phydev->speed == SPEED_10)
1346 			tw32(MAC_MI_STAT,
1347 			     MAC_MI_STAT_10MBPS_MODE |
1348 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1349 		else
1350 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1351 	}
1352 
1353 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1354 		tw32(MAC_TX_LENGTHS,
1355 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1356 		      (6 << TX_LENGTHS_IPG_SHIFT) |
1357 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1358 	else
1359 		tw32(MAC_TX_LENGTHS,
1360 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1361 		      (6 << TX_LENGTHS_IPG_SHIFT) |
1362 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1363 
1364 	if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1365 	    (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1366 	    phydev->speed != tp->link_config.active_speed ||
1367 	    phydev->duplex != tp->link_config.active_duplex ||
1368 	    oldflowctrl != tp->link_config.active_flowctrl)
1369 	    linkmesg = 1;
1370 
1371 	tp->link_config.active_speed = phydev->speed;
1372 	tp->link_config.active_duplex = phydev->duplex;
1373 
1374 	spin_unlock(&tp->lock);
1375 
1376 	if (linkmesg)
1377 		tg3_link_report(tp);
1378 }
1379 
tg3_phy_init(struct tg3 * tp)1380 static int tg3_phy_init(struct tg3 *tp)
1381 {
1382 	struct phy_device *phydev;
1383 
1384 	if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1385 		return 0;
1386 
1387 	/* Bring the PHY back to a known state. */
1388 	tg3_bmcr_reset(tp);
1389 
1390 	phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1391 
1392 	/* Attach the MAC to the PHY. */
1393 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1394 			     phydev->dev_flags, phydev->interface);
1395 	if (IS_ERR(phydev)) {
1396 		printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1397 		return PTR_ERR(phydev);
1398 	}
1399 
1400 	/* Mask with MAC supported features. */
1401 	switch (phydev->interface) {
1402 	case PHY_INTERFACE_MODE_GMII:
1403 	case PHY_INTERFACE_MODE_RGMII:
1404 		if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1405 			phydev->supported &= (PHY_GBIT_FEATURES |
1406 					      SUPPORTED_Pause |
1407 					      SUPPORTED_Asym_Pause);
1408 			break;
1409 		}
1410 		/* fallthru */
1411 	case PHY_INTERFACE_MODE_MII:
1412 		phydev->supported &= (PHY_BASIC_FEATURES |
1413 				      SUPPORTED_Pause |
1414 				      SUPPORTED_Asym_Pause);
1415 		break;
1416 	default:
1417 		phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1418 		return -EINVAL;
1419 	}
1420 
1421 	tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1422 
1423 	phydev->advertising = phydev->supported;
1424 
1425 	return 0;
1426 }
1427 
tg3_phy_start(struct tg3 * tp)1428 static void tg3_phy_start(struct tg3 *tp)
1429 {
1430 	struct phy_device *phydev;
1431 
1432 	if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1433 		return;
1434 
1435 	phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1436 
1437 	if (tp->link_config.phy_is_low_power) {
1438 		tp->link_config.phy_is_low_power = 0;
1439 		phydev->speed = tp->link_config.orig_speed;
1440 		phydev->duplex = tp->link_config.orig_duplex;
1441 		phydev->autoneg = tp->link_config.orig_autoneg;
1442 		phydev->advertising = tp->link_config.orig_advertising;
1443 	}
1444 
1445 	phy_start(phydev);
1446 
1447 	phy_start_aneg(phydev);
1448 }
1449 
tg3_phy_stop(struct tg3 * tp)1450 static void tg3_phy_stop(struct tg3 *tp)
1451 {
1452 	if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1453 		return;
1454 
1455 	phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1456 }
1457 
tg3_phy_fini(struct tg3 * tp)1458 static void tg3_phy_fini(struct tg3 *tp)
1459 {
1460 	if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1461 		phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1462 		tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1463 	}
1464 }
1465 
tg3_phydsp_write(struct tg3 * tp,u32 reg,u32 val)1466 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1467 {
1468 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1469 	tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1470 }
1471 
tg3_phy_toggle_apd(struct tg3 * tp,bool enable)1472 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1473 {
1474 	u32 reg;
1475 
1476 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1477 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1478 		return;
1479 
1480 	reg = MII_TG3_MISC_SHDW_WREN |
1481 	      MII_TG3_MISC_SHDW_SCR5_SEL |
1482 	      MII_TG3_MISC_SHDW_SCR5_LPED |
1483 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1484 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
1485 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
1486 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1487 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1488 
1489 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1490 
1491 
1492 	reg = MII_TG3_MISC_SHDW_WREN |
1493 	      MII_TG3_MISC_SHDW_APD_SEL |
1494 	      MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1495 	if (enable)
1496 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1497 
1498 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1499 }
1500 
tg3_phy_toggle_automdix(struct tg3 * tp,int enable)1501 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1502 {
1503 	u32 phy;
1504 
1505 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1506 	    (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1507 		return;
1508 
1509 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1510 		u32 ephy;
1511 
1512 		if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1513 			tg3_writephy(tp, MII_TG3_EPHY_TEST,
1514 				     ephy | MII_TG3_EPHY_SHADOW_EN);
1515 			if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1516 				if (enable)
1517 					phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1518 				else
1519 					phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1520 				tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1521 			}
1522 			tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1523 		}
1524 	} else {
1525 		phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1526 		      MII_TG3_AUXCTL_SHDWSEL_MISC;
1527 		if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1528 		    !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1529 			if (enable)
1530 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1531 			else
1532 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1533 			phy |= MII_TG3_AUXCTL_MISC_WREN;
1534 			tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1535 		}
1536 	}
1537 }
1538 
tg3_phy_set_wirespeed(struct tg3 * tp)1539 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1540 {
1541 	u32 val;
1542 
1543 	if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1544 		return;
1545 
1546 	if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1547 	    !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1548 		tg3_writephy(tp, MII_TG3_AUX_CTRL,
1549 			     (val | (1 << 15) | (1 << 4)));
1550 }
1551 
tg3_phy_apply_otp(struct tg3 * tp)1552 static void tg3_phy_apply_otp(struct tg3 *tp)
1553 {
1554 	u32 otp, phy;
1555 
1556 	if (!tp->phy_otp)
1557 		return;
1558 
1559 	otp = tp->phy_otp;
1560 
1561 	/* Enable SM_DSP clock and tx 6dB coding. */
1562 	phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1563 	      MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1564 	      MII_TG3_AUXCTL_ACTL_TX_6DB;
1565 	tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1566 
1567 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1568 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1569 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1570 
1571 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1572 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1573 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1574 
1575 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1576 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1577 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1578 
1579 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1580 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1581 
1582 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1583 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1584 
1585 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1586 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1587 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1588 
1589 	/* Turn off SM_DSP clock. */
1590 	phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1591 	      MII_TG3_AUXCTL_ACTL_TX_6DB;
1592 	tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1593 }
1594 
tg3_wait_macro_done(struct tg3 * tp)1595 static int tg3_wait_macro_done(struct tg3 *tp)
1596 {
1597 	int limit = 100;
1598 
1599 	while (limit--) {
1600 		u32 tmp32;
1601 
1602 		if (!tg3_readphy(tp, 0x16, &tmp32)) {
1603 			if ((tmp32 & 0x1000) == 0)
1604 				break;
1605 		}
1606 	}
1607 	if (limit < 0)
1608 		return -EBUSY;
1609 
1610 	return 0;
1611 }
1612 
tg3_phy_write_and_check_testpat(struct tg3 * tp,int * resetp)1613 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1614 {
1615 	static const u32 test_pat[4][6] = {
1616 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1617 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1618 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1619 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1620 	};
1621 	int chan;
1622 
1623 	for (chan = 0; chan < 4; chan++) {
1624 		int i;
1625 
1626 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1627 			     (chan * 0x2000) | 0x0200);
1628 		tg3_writephy(tp, 0x16, 0x0002);
1629 
1630 		for (i = 0; i < 6; i++)
1631 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1632 				     test_pat[chan][i]);
1633 
1634 		tg3_writephy(tp, 0x16, 0x0202);
1635 		if (tg3_wait_macro_done(tp)) {
1636 			*resetp = 1;
1637 			return -EBUSY;
1638 		}
1639 
1640 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1641 			     (chan * 0x2000) | 0x0200);
1642 		tg3_writephy(tp, 0x16, 0x0082);
1643 		if (tg3_wait_macro_done(tp)) {
1644 			*resetp = 1;
1645 			return -EBUSY;
1646 		}
1647 
1648 		tg3_writephy(tp, 0x16, 0x0802);
1649 		if (tg3_wait_macro_done(tp)) {
1650 			*resetp = 1;
1651 			return -EBUSY;
1652 		}
1653 
1654 		for (i = 0; i < 6; i += 2) {
1655 			u32 low, high;
1656 
1657 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1658 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1659 			    tg3_wait_macro_done(tp)) {
1660 				*resetp = 1;
1661 				return -EBUSY;
1662 			}
1663 			low &= 0x7fff;
1664 			high &= 0x000f;
1665 			if (low != test_pat[chan][i] ||
1666 			    high != test_pat[chan][i+1]) {
1667 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1668 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1669 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1670 
1671 				return -EBUSY;
1672 			}
1673 		}
1674 	}
1675 
1676 	return 0;
1677 }
1678 
tg3_phy_reset_chanpat(struct tg3 * tp)1679 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1680 {
1681 	int chan;
1682 
1683 	for (chan = 0; chan < 4; chan++) {
1684 		int i;
1685 
1686 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1687 			     (chan * 0x2000) | 0x0200);
1688 		tg3_writephy(tp, 0x16, 0x0002);
1689 		for (i = 0; i < 6; i++)
1690 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1691 		tg3_writephy(tp, 0x16, 0x0202);
1692 		if (tg3_wait_macro_done(tp))
1693 			return -EBUSY;
1694 	}
1695 
1696 	return 0;
1697 }
1698 
tg3_phy_reset_5703_4_5(struct tg3 * tp)1699 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1700 {
1701 	u32 reg32, phy9_orig;
1702 	int retries, do_phy_reset, err;
1703 
1704 	retries = 10;
1705 	do_phy_reset = 1;
1706 	do {
1707 		if (do_phy_reset) {
1708 			err = tg3_bmcr_reset(tp);
1709 			if (err)
1710 				return err;
1711 			do_phy_reset = 0;
1712 		}
1713 
1714 		/* Disable transmitter and interrupt.  */
1715 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1716 			continue;
1717 
1718 		reg32 |= 0x3000;
1719 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1720 
1721 		/* Set full-duplex, 1000 mbps.  */
1722 		tg3_writephy(tp, MII_BMCR,
1723 			     BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1724 
1725 		/* Set to master mode.  */
1726 		if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1727 			continue;
1728 
1729 		tg3_writephy(tp, MII_TG3_CTRL,
1730 			     (MII_TG3_CTRL_AS_MASTER |
1731 			      MII_TG3_CTRL_ENABLE_AS_MASTER));
1732 
1733 		/* Enable SM_DSP_CLOCK and 6dB.  */
1734 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1735 
1736 		/* Block the PHY control access.  */
1737 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1738 		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1739 
1740 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1741 		if (!err)
1742 			break;
1743 	} while (--retries);
1744 
1745 	err = tg3_phy_reset_chanpat(tp);
1746 	if (err)
1747 		return err;
1748 
1749 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1750 	tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1751 
1752 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1753 	tg3_writephy(tp, 0x16, 0x0000);
1754 
1755 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1756 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1757 		/* Set Extended packet length bit for jumbo frames */
1758 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1759 	}
1760 	else {
1761 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1762 	}
1763 
1764 	tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1765 
1766 	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1767 		reg32 &= ~0x3000;
1768 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1769 	} else if (!err)
1770 		err = -EBUSY;
1771 
1772 	return err;
1773 }
1774 
1775 /* This will reset the tigon3 PHY if there is no valid
1776  * link unless the FORCE argument is non-zero.
1777  */
tg3_phy_reset(struct tg3 * tp)1778 static int tg3_phy_reset(struct tg3 *tp)
1779 {
1780 	u32 cpmuctrl;
1781 	u32 phy_status;
1782 	int err;
1783 
1784 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1785 		u32 val;
1786 
1787 		val = tr32(GRC_MISC_CFG);
1788 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1789 		udelay(40);
1790 	}
1791 	err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1792 	err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1793 	if (err != 0)
1794 		return -EBUSY;
1795 
1796 	if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1797 		netif_carrier_off(tp->dev);
1798 		tg3_link_report(tp);
1799 	}
1800 
1801 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1802 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1803 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1804 		err = tg3_phy_reset_5703_4_5(tp);
1805 		if (err)
1806 			return err;
1807 		goto out;
1808 	}
1809 
1810 	cpmuctrl = 0;
1811 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1812 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1813 		cpmuctrl = tr32(TG3_CPMU_CTRL);
1814 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1815 			tw32(TG3_CPMU_CTRL,
1816 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1817 	}
1818 
1819 	err = tg3_bmcr_reset(tp);
1820 	if (err)
1821 		return err;
1822 
1823 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1824 		u32 phy;
1825 
1826 		phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1827 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1828 
1829 		tw32(TG3_CPMU_CTRL, cpmuctrl);
1830 	}
1831 
1832 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1833 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1834 		u32 val;
1835 
1836 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1837 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1838 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
1839 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1840 			udelay(40);
1841 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1842 		}
1843 	}
1844 
1845 	tg3_phy_apply_otp(tp);
1846 
1847 	if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1848 		tg3_phy_toggle_apd(tp, true);
1849 	else
1850 		tg3_phy_toggle_apd(tp, false);
1851 
1852 out:
1853 	if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1854 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1855 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1856 		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1857 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1858 		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1859 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1860 	}
1861 	if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1862 		tg3_writephy(tp, 0x1c, 0x8d68);
1863 		tg3_writephy(tp, 0x1c, 0x8d68);
1864 	}
1865 	if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1866 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1867 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1868 		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1869 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1870 		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1871 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1872 		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1873 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1874 	}
1875 	else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1876 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1877 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1878 		if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1879 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1880 			tg3_writephy(tp, MII_TG3_TEST1,
1881 				     MII_TG3_TEST1_TRIM_EN | 0x4);
1882 		} else
1883 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1884 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1885 	}
1886 	/* Set Extended packet length bit (bit 14) on all chips that */
1887 	/* support jumbo frames */
1888 	if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1889 		/* Cannot do read-modify-write on 5401 */
1890 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1891 	} else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1892 		u32 phy_reg;
1893 
1894 		/* Set bit 14 with read-modify-write to preserve other bits */
1895 		if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1896 		    !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1897 			tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1898 	}
1899 
1900 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
1901 	 * jumbo frames transmission.
1902 	 */
1903 	if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1904 		u32 phy_reg;
1905 
1906 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1907 		    tg3_writephy(tp, MII_TG3_EXT_CTRL,
1908 				 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1909 	}
1910 
1911 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1912 		/* adjust output voltage */
1913 		tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1914 	}
1915 
1916 	tg3_phy_toggle_automdix(tp, 1);
1917 	tg3_phy_set_wirespeed(tp);
1918 	return 0;
1919 }
1920 
tg3_frob_aux_power(struct tg3 * tp)1921 static void tg3_frob_aux_power(struct tg3 *tp)
1922 {
1923 	struct tg3 *tp_peer = tp;
1924 
1925 	if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1926 		return;
1927 
1928 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1929 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1930 		struct net_device *dev_peer;
1931 
1932 		dev_peer = pci_get_drvdata(tp->pdev_peer);
1933 		/* remove_one() may have been run on the peer. */
1934 		if (!dev_peer)
1935 			tp_peer = tp;
1936 		else
1937 			tp_peer = netdev_priv(dev_peer);
1938 	}
1939 
1940 	if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1941 	    (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1942 	    (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1943 	    (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1944 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1945 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1946 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1947 				    (GRC_LCLCTRL_GPIO_OE0 |
1948 				     GRC_LCLCTRL_GPIO_OE1 |
1949 				     GRC_LCLCTRL_GPIO_OE2 |
1950 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
1951 				     GRC_LCLCTRL_GPIO_OUTPUT1),
1952 				    100);
1953 		} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1954 			/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1955 			u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1956 					     GRC_LCLCTRL_GPIO_OE1 |
1957 					     GRC_LCLCTRL_GPIO_OE2 |
1958 					     GRC_LCLCTRL_GPIO_OUTPUT0 |
1959 					     GRC_LCLCTRL_GPIO_OUTPUT1 |
1960 					     tp->grc_local_ctrl;
1961 			tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1962 
1963 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1964 			tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1965 
1966 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1967 			tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1968 		} else {
1969 			u32 no_gpio2;
1970 			u32 grc_local_ctrl = 0;
1971 
1972 			if (tp_peer != tp &&
1973 			    (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1974 				return;
1975 
1976 			/* Workaround to prevent overdrawing Amps. */
1977 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1978 			    ASIC_REV_5714) {
1979 				grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1980 				tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1981 					    grc_local_ctrl, 100);
1982 			}
1983 
1984 			/* On 5753 and variants, GPIO2 cannot be used. */
1985 			no_gpio2 = tp->nic_sram_data_cfg &
1986 				    NIC_SRAM_DATA_CFG_NO_GPIO2;
1987 
1988 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1989 					 GRC_LCLCTRL_GPIO_OE1 |
1990 					 GRC_LCLCTRL_GPIO_OE2 |
1991 					 GRC_LCLCTRL_GPIO_OUTPUT1 |
1992 					 GRC_LCLCTRL_GPIO_OUTPUT2;
1993 			if (no_gpio2) {
1994 				grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1995 						    GRC_LCLCTRL_GPIO_OUTPUT2);
1996 			}
1997 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1998 						    grc_local_ctrl, 100);
1999 
2000 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2001 
2002 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2003 						    grc_local_ctrl, 100);
2004 
2005 			if (!no_gpio2) {
2006 				grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2007 				tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2008 					    grc_local_ctrl, 100);
2009 			}
2010 		}
2011 	} else {
2012 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2013 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2014 			if (tp_peer != tp &&
2015 			    (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2016 				return;
2017 
2018 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2019 				    (GRC_LCLCTRL_GPIO_OE1 |
2020 				     GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2021 
2022 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2023 				    GRC_LCLCTRL_GPIO_OE1, 100);
2024 
2025 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2026 				    (GRC_LCLCTRL_GPIO_OE1 |
2027 				     GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2028 		}
2029 	}
2030 }
2031 
tg3_5700_link_polarity(struct tg3 * tp,u32 speed)2032 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2033 {
2034 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2035 		return 1;
2036 	else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2037 		if (speed != SPEED_10)
2038 			return 1;
2039 	} else if (speed == SPEED_10)
2040 		return 1;
2041 
2042 	return 0;
2043 }
2044 
2045 static int tg3_setup_phy(struct tg3 *, int);
2046 
2047 #define RESET_KIND_SHUTDOWN	0
2048 #define RESET_KIND_INIT		1
2049 #define RESET_KIND_SUSPEND	2
2050 
2051 static void tg3_write_sig_post_reset(struct tg3 *, int);
2052 static int tg3_halt_cpu(struct tg3 *, u32);
2053 static int tg3_nvram_lock(struct tg3 *);
2054 static void tg3_nvram_unlock(struct tg3 *);
2055 
tg3_power_down_phy(struct tg3 * tp,bool do_low_power)2056 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2057 {
2058 	u32 val;
2059 
2060 	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2061 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2062 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2063 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2064 
2065 			sg_dig_ctrl |=
2066 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2067 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
2068 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2069 		}
2070 		return;
2071 	}
2072 
2073 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2074 		tg3_bmcr_reset(tp);
2075 		val = tr32(GRC_MISC_CFG);
2076 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2077 		udelay(40);
2078 		return;
2079 	} else if (do_low_power) {
2080 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
2081 			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2082 
2083 		tg3_writephy(tp, MII_TG3_AUX_CTRL,
2084 			     MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2085 			     MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2086 			     MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2087 			     MII_TG3_AUXCTL_PCTL_VREG_11V);
2088 	}
2089 
2090 	/* The PHY should not be powered down on some chips because
2091 	 * of bugs.
2092 	 */
2093 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2094 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2095 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2096 	     (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2097 		return;
2098 
2099 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2100 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2101 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2102 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2103 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2104 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2105 	}
2106 
2107 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2108 }
2109 
2110 /* tp->lock is held. */
__tg3_set_mac_addr(struct tg3 * tp,int skip_mac_1)2111 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2112 {
2113 	u32 addr_high, addr_low;
2114 	int i;
2115 
2116 	addr_high = ((tp->dev->dev_addr[0] << 8) |
2117 		     tp->dev->dev_addr[1]);
2118 	addr_low = ((tp->dev->dev_addr[2] << 24) |
2119 		    (tp->dev->dev_addr[3] << 16) |
2120 		    (tp->dev->dev_addr[4] <<  8) |
2121 		    (tp->dev->dev_addr[5] <<  0));
2122 	for (i = 0; i < 4; i++) {
2123 		if (i == 1 && skip_mac_1)
2124 			continue;
2125 		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2126 		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2127 	}
2128 
2129 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2130 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2131 		for (i = 0; i < 12; i++) {
2132 			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2133 			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2134 		}
2135 	}
2136 
2137 	addr_high = (tp->dev->dev_addr[0] +
2138 		     tp->dev->dev_addr[1] +
2139 		     tp->dev->dev_addr[2] +
2140 		     tp->dev->dev_addr[3] +
2141 		     tp->dev->dev_addr[4] +
2142 		     tp->dev->dev_addr[5]) &
2143 		TX_BACKOFF_SEED_MASK;
2144 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
2145 }
2146 
tg3_set_power_state(struct tg3 * tp,pci_power_t state)2147 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2148 {
2149 	u32 misc_host_ctrl;
2150 	bool device_should_wake, do_low_power;
2151 
2152 	/* Make sure register accesses (indirect or otherwise)
2153 	 * will function correctly.
2154 	 */
2155 	pci_write_config_dword(tp->pdev,
2156 			       TG3PCI_MISC_HOST_CTRL,
2157 			       tp->misc_host_ctrl);
2158 
2159 	switch (state) {
2160 	case PCI_D0:
2161 		pci_enable_wake(tp->pdev, state, false);
2162 		pci_set_power_state(tp->pdev, PCI_D0);
2163 
2164 		/* Switch out of Vaux if it is a NIC */
2165 		if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2166 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2167 
2168 		return 0;
2169 
2170 	case PCI_D1:
2171 	case PCI_D2:
2172 	case PCI_D3hot:
2173 		break;
2174 
2175 	default:
2176 		printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2177 			tp->dev->name, state);
2178 		return -EINVAL;
2179 	}
2180 
2181 	/* Restore the CLKREQ setting. */
2182 	if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2183 		u16 lnkctl;
2184 
2185 		pci_read_config_word(tp->pdev,
2186 				     tp->pcie_cap + PCI_EXP_LNKCTL,
2187 				     &lnkctl);
2188 		lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2189 		pci_write_config_word(tp->pdev,
2190 				      tp->pcie_cap + PCI_EXP_LNKCTL,
2191 				      lnkctl);
2192 	}
2193 
2194 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2195 	tw32(TG3PCI_MISC_HOST_CTRL,
2196 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2197 
2198 	device_should_wake = pci_pme_capable(tp->pdev, state) &&
2199 			     device_may_wakeup(&tp->pdev->dev) &&
2200 			     (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2201 
2202 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2203 		do_low_power = false;
2204 		if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2205 		    !tp->link_config.phy_is_low_power) {
2206 			struct phy_device *phydev;
2207 			u32 phyid, advertising;
2208 
2209 			phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2210 
2211 			tp->link_config.phy_is_low_power = 1;
2212 
2213 			tp->link_config.orig_speed = phydev->speed;
2214 			tp->link_config.orig_duplex = phydev->duplex;
2215 			tp->link_config.orig_autoneg = phydev->autoneg;
2216 			tp->link_config.orig_advertising = phydev->advertising;
2217 
2218 			advertising = ADVERTISED_TP |
2219 				      ADVERTISED_Pause |
2220 				      ADVERTISED_Autoneg |
2221 				      ADVERTISED_10baseT_Half;
2222 
2223 			if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2224 			    device_should_wake) {
2225 				if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2226 					advertising |=
2227 						ADVERTISED_100baseT_Half |
2228 						ADVERTISED_100baseT_Full |
2229 						ADVERTISED_10baseT_Full;
2230 				else
2231 					advertising |= ADVERTISED_10baseT_Full;
2232 			}
2233 
2234 			phydev->advertising = advertising;
2235 
2236 			phy_start_aneg(phydev);
2237 
2238 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2239 			if (phyid != TG3_PHY_ID_BCMAC131) {
2240 				phyid &= TG3_PHY_OUI_MASK;
2241 				if (phyid == TG3_PHY_OUI_1 ||
2242 				    phyid == TG3_PHY_OUI_2 ||
2243 				    phyid == TG3_PHY_OUI_3)
2244 					do_low_power = true;
2245 			}
2246 		}
2247 	} else {
2248 		do_low_power = true;
2249 
2250 		if (tp->link_config.phy_is_low_power == 0) {
2251 			tp->link_config.phy_is_low_power = 1;
2252 			tp->link_config.orig_speed = tp->link_config.speed;
2253 			tp->link_config.orig_duplex = tp->link_config.duplex;
2254 			tp->link_config.orig_autoneg = tp->link_config.autoneg;
2255 		}
2256 
2257 		if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2258 			tp->link_config.speed = SPEED_10;
2259 			tp->link_config.duplex = DUPLEX_HALF;
2260 			tp->link_config.autoneg = AUTONEG_ENABLE;
2261 			tg3_setup_phy(tp, 0);
2262 		}
2263 	}
2264 
2265 	__tg3_set_mac_addr(tp, 0);
2266 
2267 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2268 		u32 val;
2269 
2270 		val = tr32(GRC_VCPU_EXT_CTRL);
2271 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2272 	} else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2273 		int i;
2274 		u32 val;
2275 
2276 		for (i = 0; i < 200; i++) {
2277 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2278 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2279 				break;
2280 			msleep(1);
2281 		}
2282 	}
2283 	if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2284 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2285 						     WOL_DRV_STATE_SHUTDOWN |
2286 						     WOL_DRV_WOL |
2287 						     WOL_SET_MAGIC_PKT);
2288 
2289 	if (device_should_wake) {
2290 		u32 mac_mode;
2291 
2292 		if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2293 			if (do_low_power) {
2294 				tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2295 				udelay(40);
2296 			}
2297 
2298 			if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2299 				mac_mode = MAC_MODE_PORT_MODE_GMII;
2300 			else
2301 				mac_mode = MAC_MODE_PORT_MODE_MII;
2302 
2303 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2304 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2305 			    ASIC_REV_5700) {
2306 				u32 speed = (tp->tg3_flags &
2307 					     TG3_FLAG_WOL_SPEED_100MB) ?
2308 					     SPEED_100 : SPEED_10;
2309 				if (tg3_5700_link_polarity(tp, speed))
2310 					mac_mode |= MAC_MODE_LINK_POLARITY;
2311 				else
2312 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
2313 			}
2314 		} else {
2315 			mac_mode = MAC_MODE_PORT_MODE_TBI;
2316 		}
2317 
2318 		if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2319 			tw32(MAC_LED_CTRL, tp->led_ctrl);
2320 
2321 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2322 		if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2323 		    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2324 		    ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2325 		     (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2326 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2327 
2328 		if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2329 			mac_mode |= tp->mac_mode &
2330 				    (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2331 			if (mac_mode & MAC_MODE_APE_TX_EN)
2332 				mac_mode |= MAC_MODE_TDE_ENABLE;
2333 		}
2334 
2335 		tw32_f(MAC_MODE, mac_mode);
2336 		udelay(100);
2337 
2338 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2339 		udelay(10);
2340 	}
2341 
2342 	if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2343 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2344 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2345 		u32 base_val;
2346 
2347 		base_val = tp->pci_clock_ctrl;
2348 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2349 			     CLOCK_CTRL_TXCLK_DISABLE);
2350 
2351 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2352 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
2353 	} else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2354 		   (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2355 		   (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2356 		/* do nothing */
2357 	} else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2358 		     (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2359 		u32 newbits1, newbits2;
2360 
2361 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2362 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2363 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2364 				    CLOCK_CTRL_TXCLK_DISABLE |
2365 				    CLOCK_CTRL_ALTCLK);
2366 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2367 		} else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2368 			newbits1 = CLOCK_CTRL_625_CORE;
2369 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2370 		} else {
2371 			newbits1 = CLOCK_CTRL_ALTCLK;
2372 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2373 		}
2374 
2375 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2376 			    40);
2377 
2378 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2379 			    40);
2380 
2381 		if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2382 			u32 newbits3;
2383 
2384 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2385 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2386 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2387 					    CLOCK_CTRL_TXCLK_DISABLE |
2388 					    CLOCK_CTRL_44MHZ_CORE);
2389 			} else {
2390 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
2391 			}
2392 
2393 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
2394 				    tp->pci_clock_ctrl | newbits3, 40);
2395 		}
2396 	}
2397 
2398 	if (!(device_should_wake) &&
2399 	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2400 		tg3_power_down_phy(tp, do_low_power);
2401 
2402 	tg3_frob_aux_power(tp);
2403 
2404 	/* Workaround for unstable PLL clock */
2405 	if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2406 	    (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2407 		u32 val = tr32(0x7d00);
2408 
2409 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2410 		tw32(0x7d00, val);
2411 		if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2412 			int err;
2413 
2414 			err = tg3_nvram_lock(tp);
2415 			tg3_halt_cpu(tp, RX_CPU_BASE);
2416 			if (!err)
2417 				tg3_nvram_unlock(tp);
2418 		}
2419 	}
2420 
2421 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2422 
2423 	if (device_should_wake)
2424 		pci_enable_wake(tp->pdev, state, true);
2425 
2426 	/* Finally, set the new power state. */
2427 	pci_set_power_state(tp->pdev, state);
2428 
2429 	return 0;
2430 }
2431 
tg3_aux_stat_to_speed_duplex(struct tg3 * tp,u32 val,u16 * speed,u8 * duplex)2432 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2433 {
2434 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2435 	case MII_TG3_AUX_STAT_10HALF:
2436 		*speed = SPEED_10;
2437 		*duplex = DUPLEX_HALF;
2438 		break;
2439 
2440 	case MII_TG3_AUX_STAT_10FULL:
2441 		*speed = SPEED_10;
2442 		*duplex = DUPLEX_FULL;
2443 		break;
2444 
2445 	case MII_TG3_AUX_STAT_100HALF:
2446 		*speed = SPEED_100;
2447 		*duplex = DUPLEX_HALF;
2448 		break;
2449 
2450 	case MII_TG3_AUX_STAT_100FULL:
2451 		*speed = SPEED_100;
2452 		*duplex = DUPLEX_FULL;
2453 		break;
2454 
2455 	case MII_TG3_AUX_STAT_1000HALF:
2456 		*speed = SPEED_1000;
2457 		*duplex = DUPLEX_HALF;
2458 		break;
2459 
2460 	case MII_TG3_AUX_STAT_1000FULL:
2461 		*speed = SPEED_1000;
2462 		*duplex = DUPLEX_FULL;
2463 		break;
2464 
2465 	default:
2466 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2467 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2468 				 SPEED_10;
2469 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2470 				  DUPLEX_HALF;
2471 			break;
2472 		}
2473 		*speed = SPEED_INVALID;
2474 		*duplex = DUPLEX_INVALID;
2475 		break;
2476 	}
2477 }
2478 
tg3_phy_copper_begin(struct tg3 * tp)2479 static void tg3_phy_copper_begin(struct tg3 *tp)
2480 {
2481 	u32 new_adv;
2482 	int i;
2483 
2484 	if (tp->link_config.phy_is_low_power) {
2485 		/* Entering low power mode.  Disable gigabit and
2486 		 * 100baseT advertisements.
2487 		 */
2488 		tg3_writephy(tp, MII_TG3_CTRL, 0);
2489 
2490 		new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2491 			   ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2492 		if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2493 			new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2494 
2495 		tg3_writephy(tp, MII_ADVERTISE, new_adv);
2496 	} else if (tp->link_config.speed == SPEED_INVALID) {
2497 		if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2498 			tp->link_config.advertising &=
2499 				~(ADVERTISED_1000baseT_Half |
2500 				  ADVERTISED_1000baseT_Full);
2501 
2502 		new_adv = ADVERTISE_CSMA;
2503 		if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2504 			new_adv |= ADVERTISE_10HALF;
2505 		if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2506 			new_adv |= ADVERTISE_10FULL;
2507 		if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2508 			new_adv |= ADVERTISE_100HALF;
2509 		if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2510 			new_adv |= ADVERTISE_100FULL;
2511 
2512 		new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2513 
2514 		tg3_writephy(tp, MII_ADVERTISE, new_adv);
2515 
2516 		if (tp->link_config.advertising &
2517 		    (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2518 			new_adv = 0;
2519 			if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2520 				new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2521 			if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2522 				new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2523 			if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2524 			    (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2525 			     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2526 				new_adv |= (MII_TG3_CTRL_AS_MASTER |
2527 					    MII_TG3_CTRL_ENABLE_AS_MASTER);
2528 			tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2529 		} else {
2530 			tg3_writephy(tp, MII_TG3_CTRL, 0);
2531 		}
2532 	} else {
2533 		new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2534 		new_adv |= ADVERTISE_CSMA;
2535 
2536 		/* Asking for a specific link mode. */
2537 		if (tp->link_config.speed == SPEED_1000) {
2538 			tg3_writephy(tp, MII_ADVERTISE, new_adv);
2539 
2540 			if (tp->link_config.duplex == DUPLEX_FULL)
2541 				new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2542 			else
2543 				new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2544 			if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2545 			    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2546 				new_adv |= (MII_TG3_CTRL_AS_MASTER |
2547 					    MII_TG3_CTRL_ENABLE_AS_MASTER);
2548 		} else {
2549 			if (tp->link_config.speed == SPEED_100) {
2550 				if (tp->link_config.duplex == DUPLEX_FULL)
2551 					new_adv |= ADVERTISE_100FULL;
2552 				else
2553 					new_adv |= ADVERTISE_100HALF;
2554 			} else {
2555 				if (tp->link_config.duplex == DUPLEX_FULL)
2556 					new_adv |= ADVERTISE_10FULL;
2557 				else
2558 					new_adv |= ADVERTISE_10HALF;
2559 			}
2560 			tg3_writephy(tp, MII_ADVERTISE, new_adv);
2561 
2562 			new_adv = 0;
2563 		}
2564 
2565 		tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2566 	}
2567 
2568 	if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2569 	    tp->link_config.speed != SPEED_INVALID) {
2570 		u32 bmcr, orig_bmcr;
2571 
2572 		tp->link_config.active_speed = tp->link_config.speed;
2573 		tp->link_config.active_duplex = tp->link_config.duplex;
2574 
2575 		bmcr = 0;
2576 		switch (tp->link_config.speed) {
2577 		default:
2578 		case SPEED_10:
2579 			break;
2580 
2581 		case SPEED_100:
2582 			bmcr |= BMCR_SPEED100;
2583 			break;
2584 
2585 		case SPEED_1000:
2586 			bmcr |= TG3_BMCR_SPEED1000;
2587 			break;
2588 		}
2589 
2590 		if (tp->link_config.duplex == DUPLEX_FULL)
2591 			bmcr |= BMCR_FULLDPLX;
2592 
2593 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2594 		    (bmcr != orig_bmcr)) {
2595 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2596 			for (i = 0; i < 1500; i++) {
2597 				u32 tmp;
2598 
2599 				udelay(10);
2600 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2601 				    tg3_readphy(tp, MII_BMSR, &tmp))
2602 					continue;
2603 				if (!(tmp & BMSR_LSTATUS)) {
2604 					udelay(40);
2605 					break;
2606 				}
2607 			}
2608 			tg3_writephy(tp, MII_BMCR, bmcr);
2609 			udelay(40);
2610 		}
2611 	} else {
2612 		tg3_writephy(tp, MII_BMCR,
2613 			     BMCR_ANENABLE | BMCR_ANRESTART);
2614 	}
2615 }
2616 
tg3_init_5401phy_dsp(struct tg3 * tp)2617 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2618 {
2619 	int err;
2620 
2621 	/* Turn off tap power management. */
2622 	/* Set Extended packet length bit */
2623 	err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2624 
2625 	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2626 	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2627 
2628 	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2629 	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2630 
2631 	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2632 	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2633 
2634 	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2635 	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2636 
2637 	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2638 	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2639 
2640 	udelay(40);
2641 
2642 	return err;
2643 }
2644 
tg3_copper_is_advertising_all(struct tg3 * tp,u32 mask)2645 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2646 {
2647 	u32 adv_reg, all_mask = 0;
2648 
2649 	if (mask & ADVERTISED_10baseT_Half)
2650 		all_mask |= ADVERTISE_10HALF;
2651 	if (mask & ADVERTISED_10baseT_Full)
2652 		all_mask |= ADVERTISE_10FULL;
2653 	if (mask & ADVERTISED_100baseT_Half)
2654 		all_mask |= ADVERTISE_100HALF;
2655 	if (mask & ADVERTISED_100baseT_Full)
2656 		all_mask |= ADVERTISE_100FULL;
2657 
2658 	if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2659 		return 0;
2660 
2661 	if ((adv_reg & all_mask) != all_mask)
2662 		return 0;
2663 	if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2664 		u32 tg3_ctrl;
2665 
2666 		all_mask = 0;
2667 		if (mask & ADVERTISED_1000baseT_Half)
2668 			all_mask |= ADVERTISE_1000HALF;
2669 		if (mask & ADVERTISED_1000baseT_Full)
2670 			all_mask |= ADVERTISE_1000FULL;
2671 
2672 		if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2673 			return 0;
2674 
2675 		if ((tg3_ctrl & all_mask) != all_mask)
2676 			return 0;
2677 	}
2678 	return 1;
2679 }
2680 
tg3_adv_1000T_flowctrl_ok(struct tg3 * tp,u32 * lcladv,u32 * rmtadv)2681 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2682 {
2683 	u32 curadv, reqadv;
2684 
2685 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2686 		return 1;
2687 
2688 	curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2689 	reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2690 
2691 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
2692 		if (curadv != reqadv)
2693 			return 0;
2694 
2695 		if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2696 			tg3_readphy(tp, MII_LPA, rmtadv);
2697 	} else {
2698 		/* Reprogram the advertisement register, even if it
2699 		 * does not affect the current link.  If the link
2700 		 * gets renegotiated in the future, we can save an
2701 		 * additional renegotiation cycle by advertising
2702 		 * it correctly in the first place.
2703 		 */
2704 		if (curadv != reqadv) {
2705 			*lcladv &= ~(ADVERTISE_PAUSE_CAP |
2706 				     ADVERTISE_PAUSE_ASYM);
2707 			tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2708 		}
2709 	}
2710 
2711 	return 1;
2712 }
2713 
tg3_setup_copper_phy(struct tg3 * tp,int force_reset)2714 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2715 {
2716 	int current_link_up;
2717 	u32 bmsr, dummy;
2718 	u32 lcl_adv, rmt_adv;
2719 	u16 current_speed;
2720 	u8 current_duplex;
2721 	int i, err;
2722 
2723 	tw32(MAC_EVENT, 0);
2724 
2725 	tw32_f(MAC_STATUS,
2726 	     (MAC_STATUS_SYNC_CHANGED |
2727 	      MAC_STATUS_CFG_CHANGED |
2728 	      MAC_STATUS_MI_COMPLETION |
2729 	      MAC_STATUS_LNKSTATE_CHANGED));
2730 	udelay(40);
2731 
2732 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2733 		tw32_f(MAC_MI_MODE,
2734 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2735 		udelay(80);
2736 	}
2737 
2738 	tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2739 
2740 	/* Some third-party PHYs need to be reset on link going
2741 	 * down.
2742 	 */
2743 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2744 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2745 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2746 	    netif_carrier_ok(tp->dev)) {
2747 		tg3_readphy(tp, MII_BMSR, &bmsr);
2748 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2749 		    !(bmsr & BMSR_LSTATUS))
2750 			force_reset = 1;
2751 	}
2752 	if (force_reset)
2753 		tg3_phy_reset(tp);
2754 
2755 	if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2756 		tg3_readphy(tp, MII_BMSR, &bmsr);
2757 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2758 		    !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2759 			bmsr = 0;
2760 
2761 		if (!(bmsr & BMSR_LSTATUS)) {
2762 			err = tg3_init_5401phy_dsp(tp);
2763 			if (err)
2764 				return err;
2765 
2766 			tg3_readphy(tp, MII_BMSR, &bmsr);
2767 			for (i = 0; i < 1000; i++) {
2768 				udelay(10);
2769 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2770 				    (bmsr & BMSR_LSTATUS)) {
2771 					udelay(40);
2772 					break;
2773 				}
2774 			}
2775 
2776 			if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2777 			    !(bmsr & BMSR_LSTATUS) &&
2778 			    tp->link_config.active_speed == SPEED_1000) {
2779 				err = tg3_phy_reset(tp);
2780 				if (!err)
2781 					err = tg3_init_5401phy_dsp(tp);
2782 				if (err)
2783 					return err;
2784 			}
2785 		}
2786 	} else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2787 		   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2788 		/* 5701 {A0,B0} CRC bug workaround */
2789 		tg3_writephy(tp, 0x15, 0x0a75);
2790 		tg3_writephy(tp, 0x1c, 0x8c68);
2791 		tg3_writephy(tp, 0x1c, 0x8d68);
2792 		tg3_writephy(tp, 0x1c, 0x8c68);
2793 	}
2794 
2795 	/* Clear pending interrupts... */
2796 	tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2797 	tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2798 
2799 	if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2800 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2801 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2802 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
2803 
2804 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2805 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2806 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2807 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2808 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2809 		else
2810 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2811 	}
2812 
2813 	current_link_up = 0;
2814 	current_speed = SPEED_INVALID;
2815 	current_duplex = DUPLEX_INVALID;
2816 
2817 	if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2818 		u32 val;
2819 
2820 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2821 		tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2822 		if (!(val & (1 << 10))) {
2823 			val |= (1 << 10);
2824 			tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2825 			goto relink;
2826 		}
2827 	}
2828 
2829 	bmsr = 0;
2830 	for (i = 0; i < 100; i++) {
2831 		tg3_readphy(tp, MII_BMSR, &bmsr);
2832 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2833 		    (bmsr & BMSR_LSTATUS))
2834 			break;
2835 		udelay(40);
2836 	}
2837 
2838 	if (bmsr & BMSR_LSTATUS) {
2839 		u32 aux_stat, bmcr;
2840 
2841 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2842 		for (i = 0; i < 2000; i++) {
2843 			udelay(10);
2844 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2845 			    aux_stat)
2846 				break;
2847 		}
2848 
2849 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2850 					     &current_speed,
2851 					     &current_duplex);
2852 
2853 		bmcr = 0;
2854 		for (i = 0; i < 200; i++) {
2855 			tg3_readphy(tp, MII_BMCR, &bmcr);
2856 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
2857 				continue;
2858 			if (bmcr && bmcr != 0x7fff)
2859 				break;
2860 			udelay(10);
2861 		}
2862 
2863 		lcl_adv = 0;
2864 		rmt_adv = 0;
2865 
2866 		tp->link_config.active_speed = current_speed;
2867 		tp->link_config.active_duplex = current_duplex;
2868 
2869 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2870 			if ((bmcr & BMCR_ANENABLE) &&
2871 			    tg3_copper_is_advertising_all(tp,
2872 						tp->link_config.advertising)) {
2873 				if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2874 								  &rmt_adv))
2875 					current_link_up = 1;
2876 			}
2877 		} else {
2878 			if (!(bmcr & BMCR_ANENABLE) &&
2879 			    tp->link_config.speed == current_speed &&
2880 			    tp->link_config.duplex == current_duplex &&
2881 			    tp->link_config.flowctrl ==
2882 			    tp->link_config.active_flowctrl) {
2883 				current_link_up = 1;
2884 			}
2885 		}
2886 
2887 		if (current_link_up == 1 &&
2888 		    tp->link_config.active_duplex == DUPLEX_FULL)
2889 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2890 	}
2891 
2892 relink:
2893 	if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2894 		u32 tmp;
2895 
2896 		tg3_phy_copper_begin(tp);
2897 
2898 		tg3_readphy(tp, MII_BMSR, &tmp);
2899 		if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2900 		    (tmp & BMSR_LSTATUS))
2901 			current_link_up = 1;
2902 	}
2903 
2904 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2905 	if (current_link_up == 1) {
2906 		if (tp->link_config.active_speed == SPEED_100 ||
2907 		    tp->link_config.active_speed == SPEED_10)
2908 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2909 		else
2910 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2911 	} else
2912 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2913 
2914 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2915 	if (tp->link_config.active_duplex == DUPLEX_HALF)
2916 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2917 
2918 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2919 		if (current_link_up == 1 &&
2920 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2921 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2922 		else
2923 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2924 	}
2925 
2926 	/* ??? Without this setting Netgear GA302T PHY does not
2927 	 * ??? send/receive packets...
2928 	 */
2929 	if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2930 	    tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2931 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2932 		tw32_f(MAC_MI_MODE, tp->mi_mode);
2933 		udelay(80);
2934 	}
2935 
2936 	tw32_f(MAC_MODE, tp->mac_mode);
2937 	udelay(40);
2938 
2939 	if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2940 		/* Polled via timer. */
2941 		tw32_f(MAC_EVENT, 0);
2942 	} else {
2943 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2944 	}
2945 	udelay(40);
2946 
2947 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2948 	    current_link_up == 1 &&
2949 	    tp->link_config.active_speed == SPEED_1000 &&
2950 	    ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2951 	     (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2952 		udelay(120);
2953 		tw32_f(MAC_STATUS,
2954 		     (MAC_STATUS_SYNC_CHANGED |
2955 		      MAC_STATUS_CFG_CHANGED));
2956 		udelay(40);
2957 		tg3_write_mem(tp,
2958 			      NIC_SRAM_FIRMWARE_MBOX,
2959 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2960 	}
2961 
2962 	/* Prevent send BD corruption. */
2963 	if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2964 		u16 oldlnkctl, newlnkctl;
2965 
2966 		pci_read_config_word(tp->pdev,
2967 				     tp->pcie_cap + PCI_EXP_LNKCTL,
2968 				     &oldlnkctl);
2969 		if (tp->link_config.active_speed == SPEED_100 ||
2970 		    tp->link_config.active_speed == SPEED_10)
2971 			newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
2972 		else
2973 			newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
2974 		if (newlnkctl != oldlnkctl)
2975 			pci_write_config_word(tp->pdev,
2976 					      tp->pcie_cap + PCI_EXP_LNKCTL,
2977 					      newlnkctl);
2978 	}
2979 
2980 	if (current_link_up != netif_carrier_ok(tp->dev)) {
2981 		if (current_link_up)
2982 			netif_carrier_on(tp->dev);
2983 		else
2984 			netif_carrier_off(tp->dev);
2985 		tg3_link_report(tp);
2986 	}
2987 
2988 	return 0;
2989 }
2990 
2991 struct tg3_fiber_aneginfo {
2992 	int state;
2993 #define ANEG_STATE_UNKNOWN		0
2994 #define ANEG_STATE_AN_ENABLE		1
2995 #define ANEG_STATE_RESTART_INIT		2
2996 #define ANEG_STATE_RESTART		3
2997 #define ANEG_STATE_DISABLE_LINK_OK	4
2998 #define ANEG_STATE_ABILITY_DETECT_INIT	5
2999 #define ANEG_STATE_ABILITY_DETECT	6
3000 #define ANEG_STATE_ACK_DETECT_INIT	7
3001 #define ANEG_STATE_ACK_DETECT		8
3002 #define ANEG_STATE_COMPLETE_ACK_INIT	9
3003 #define ANEG_STATE_COMPLETE_ACK		10
3004 #define ANEG_STATE_IDLE_DETECT_INIT	11
3005 #define ANEG_STATE_IDLE_DETECT		12
3006 #define ANEG_STATE_LINK_OK		13
3007 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
3008 #define ANEG_STATE_NEXT_PAGE_WAIT	15
3009 
3010 	u32 flags;
3011 #define MR_AN_ENABLE		0x00000001
3012 #define MR_RESTART_AN		0x00000002
3013 #define MR_AN_COMPLETE		0x00000004
3014 #define MR_PAGE_RX		0x00000008
3015 #define MR_NP_LOADED		0x00000010
3016 #define MR_TOGGLE_TX		0x00000020
3017 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
3018 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
3019 #define MR_LP_ADV_SYM_PAUSE	0x00000100
3020 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
3021 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
3022 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
3023 #define MR_LP_ADV_NEXT_PAGE	0x00001000
3024 #define MR_TOGGLE_RX		0x00002000
3025 #define MR_NP_RX		0x00004000
3026 
3027 #define MR_LINK_OK		0x80000000
3028 
3029 	unsigned long link_time, cur_time;
3030 
3031 	u32 ability_match_cfg;
3032 	int ability_match_count;
3033 
3034 	char ability_match, idle_match, ack_match;
3035 
3036 	u32 txconfig, rxconfig;
3037 #define ANEG_CFG_NP		0x00000080
3038 #define ANEG_CFG_ACK		0x00000040
3039 #define ANEG_CFG_RF2		0x00000020
3040 #define ANEG_CFG_RF1		0x00000010
3041 #define ANEG_CFG_PS2		0x00000001
3042 #define ANEG_CFG_PS1		0x00008000
3043 #define ANEG_CFG_HD		0x00004000
3044 #define ANEG_CFG_FD		0x00002000
3045 #define ANEG_CFG_INVAL		0x00001f06
3046 
3047 };
3048 #define ANEG_OK		0
3049 #define ANEG_DONE	1
3050 #define ANEG_TIMER_ENAB	2
3051 #define ANEG_FAILED	-1
3052 
3053 #define ANEG_STATE_SETTLE_TIME	10000
3054 
tg3_fiber_aneg_smachine(struct tg3 * tp,struct tg3_fiber_aneginfo * ap)3055 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3056 				   struct tg3_fiber_aneginfo *ap)
3057 {
3058 	u16 flowctrl;
3059 	unsigned long delta;
3060 	u32 rx_cfg_reg;
3061 	int ret;
3062 
3063 	if (ap->state == ANEG_STATE_UNKNOWN) {
3064 		ap->rxconfig = 0;
3065 		ap->link_time = 0;
3066 		ap->cur_time = 0;
3067 		ap->ability_match_cfg = 0;
3068 		ap->ability_match_count = 0;
3069 		ap->ability_match = 0;
3070 		ap->idle_match = 0;
3071 		ap->ack_match = 0;
3072 	}
3073 	ap->cur_time++;
3074 
3075 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3076 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3077 
3078 		if (rx_cfg_reg != ap->ability_match_cfg) {
3079 			ap->ability_match_cfg = rx_cfg_reg;
3080 			ap->ability_match = 0;
3081 			ap->ability_match_count = 0;
3082 		} else {
3083 			if (++ap->ability_match_count > 1) {
3084 				ap->ability_match = 1;
3085 				ap->ability_match_cfg = rx_cfg_reg;
3086 			}
3087 		}
3088 		if (rx_cfg_reg & ANEG_CFG_ACK)
3089 			ap->ack_match = 1;
3090 		else
3091 			ap->ack_match = 0;
3092 
3093 		ap->idle_match = 0;
3094 	} else {
3095 		ap->idle_match = 1;
3096 		ap->ability_match_cfg = 0;
3097 		ap->ability_match_count = 0;
3098 		ap->ability_match = 0;
3099 		ap->ack_match = 0;
3100 
3101 		rx_cfg_reg = 0;
3102 	}
3103 
3104 	ap->rxconfig = rx_cfg_reg;
3105 	ret = ANEG_OK;
3106 
3107 	switch(ap->state) {
3108 	case ANEG_STATE_UNKNOWN:
3109 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3110 			ap->state = ANEG_STATE_AN_ENABLE;
3111 
3112 		/* fallthru */
3113 	case ANEG_STATE_AN_ENABLE:
3114 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3115 		if (ap->flags & MR_AN_ENABLE) {
3116 			ap->link_time = 0;
3117 			ap->cur_time = 0;
3118 			ap->ability_match_cfg = 0;
3119 			ap->ability_match_count = 0;
3120 			ap->ability_match = 0;
3121 			ap->idle_match = 0;
3122 			ap->ack_match = 0;
3123 
3124 			ap->state = ANEG_STATE_RESTART_INIT;
3125 		} else {
3126 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
3127 		}
3128 		break;
3129 
3130 	case ANEG_STATE_RESTART_INIT:
3131 		ap->link_time = ap->cur_time;
3132 		ap->flags &= ~(MR_NP_LOADED);
3133 		ap->txconfig = 0;
3134 		tw32(MAC_TX_AUTO_NEG, 0);
3135 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3136 		tw32_f(MAC_MODE, tp->mac_mode);
3137 		udelay(40);
3138 
3139 		ret = ANEG_TIMER_ENAB;
3140 		ap->state = ANEG_STATE_RESTART;
3141 
3142 		/* fallthru */
3143 	case ANEG_STATE_RESTART:
3144 		delta = ap->cur_time - ap->link_time;
3145 		if (delta > ANEG_STATE_SETTLE_TIME) {
3146 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3147 		} else {
3148 			ret = ANEG_TIMER_ENAB;
3149 		}
3150 		break;
3151 
3152 	case ANEG_STATE_DISABLE_LINK_OK:
3153 		ret = ANEG_DONE;
3154 		break;
3155 
3156 	case ANEG_STATE_ABILITY_DETECT_INIT:
3157 		ap->flags &= ~(MR_TOGGLE_TX);
3158 		ap->txconfig = ANEG_CFG_FD;
3159 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3160 		if (flowctrl & ADVERTISE_1000XPAUSE)
3161 			ap->txconfig |= ANEG_CFG_PS1;
3162 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3163 			ap->txconfig |= ANEG_CFG_PS2;
3164 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3165 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3166 		tw32_f(MAC_MODE, tp->mac_mode);
3167 		udelay(40);
3168 
3169 		ap->state = ANEG_STATE_ABILITY_DETECT;
3170 		break;
3171 
3172 	case ANEG_STATE_ABILITY_DETECT:
3173 		if (ap->ability_match != 0 && ap->rxconfig != 0) {
3174 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
3175 		}
3176 		break;
3177 
3178 	case ANEG_STATE_ACK_DETECT_INIT:
3179 		ap->txconfig |= ANEG_CFG_ACK;
3180 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3181 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3182 		tw32_f(MAC_MODE, tp->mac_mode);
3183 		udelay(40);
3184 
3185 		ap->state = ANEG_STATE_ACK_DETECT;
3186 
3187 		/* fallthru */
3188 	case ANEG_STATE_ACK_DETECT:
3189 		if (ap->ack_match != 0) {
3190 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3191 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3192 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3193 			} else {
3194 				ap->state = ANEG_STATE_AN_ENABLE;
3195 			}
3196 		} else if (ap->ability_match != 0 &&
3197 			   ap->rxconfig == 0) {
3198 			ap->state = ANEG_STATE_AN_ENABLE;
3199 		}
3200 		break;
3201 
3202 	case ANEG_STATE_COMPLETE_ACK_INIT:
3203 		if (ap->rxconfig & ANEG_CFG_INVAL) {
3204 			ret = ANEG_FAILED;
3205 			break;
3206 		}
3207 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3208 			       MR_LP_ADV_HALF_DUPLEX |
3209 			       MR_LP_ADV_SYM_PAUSE |
3210 			       MR_LP_ADV_ASYM_PAUSE |
3211 			       MR_LP_ADV_REMOTE_FAULT1 |
3212 			       MR_LP_ADV_REMOTE_FAULT2 |
3213 			       MR_LP_ADV_NEXT_PAGE |
3214 			       MR_TOGGLE_RX |
3215 			       MR_NP_RX);
3216 		if (ap->rxconfig & ANEG_CFG_FD)
3217 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3218 		if (ap->rxconfig & ANEG_CFG_HD)
3219 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3220 		if (ap->rxconfig & ANEG_CFG_PS1)
3221 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
3222 		if (ap->rxconfig & ANEG_CFG_PS2)
3223 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3224 		if (ap->rxconfig & ANEG_CFG_RF1)
3225 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3226 		if (ap->rxconfig & ANEG_CFG_RF2)
3227 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3228 		if (ap->rxconfig & ANEG_CFG_NP)
3229 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
3230 
3231 		ap->link_time = ap->cur_time;
3232 
3233 		ap->flags ^= (MR_TOGGLE_TX);
3234 		if (ap->rxconfig & 0x0008)
3235 			ap->flags |= MR_TOGGLE_RX;
3236 		if (ap->rxconfig & ANEG_CFG_NP)
3237 			ap->flags |= MR_NP_RX;
3238 		ap->flags |= MR_PAGE_RX;
3239 
3240 		ap->state = ANEG_STATE_COMPLETE_ACK;
3241 		ret = ANEG_TIMER_ENAB;
3242 		break;
3243 
3244 	case ANEG_STATE_COMPLETE_ACK:
3245 		if (ap->ability_match != 0 &&
3246 		    ap->rxconfig == 0) {
3247 			ap->state = ANEG_STATE_AN_ENABLE;
3248 			break;
3249 		}
3250 		delta = ap->cur_time - ap->link_time;
3251 		if (delta > ANEG_STATE_SETTLE_TIME) {
3252 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3253 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3254 			} else {
3255 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3256 				    !(ap->flags & MR_NP_RX)) {
3257 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3258 				} else {
3259 					ret = ANEG_FAILED;
3260 				}
3261 			}
3262 		}
3263 		break;
3264 
3265 	case ANEG_STATE_IDLE_DETECT_INIT:
3266 		ap->link_time = ap->cur_time;
3267 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3268 		tw32_f(MAC_MODE, tp->mac_mode);
3269 		udelay(40);
3270 
3271 		ap->state = ANEG_STATE_IDLE_DETECT;
3272 		ret = ANEG_TIMER_ENAB;
3273 		break;
3274 
3275 	case ANEG_STATE_IDLE_DETECT:
3276 		if (ap->ability_match != 0 &&
3277 		    ap->rxconfig == 0) {
3278 			ap->state = ANEG_STATE_AN_ENABLE;
3279 			break;
3280 		}
3281 		delta = ap->cur_time - ap->link_time;
3282 		if (delta > ANEG_STATE_SETTLE_TIME) {
3283 			/* XXX another gem from the Broadcom driver :( */
3284 			ap->state = ANEG_STATE_LINK_OK;
3285 		}
3286 		break;
3287 
3288 	case ANEG_STATE_LINK_OK:
3289 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3290 		ret = ANEG_DONE;
3291 		break;
3292 
3293 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3294 		/* ??? unimplemented */
3295 		break;
3296 
3297 	case ANEG_STATE_NEXT_PAGE_WAIT:
3298 		/* ??? unimplemented */
3299 		break;
3300 
3301 	default:
3302 		ret = ANEG_FAILED;
3303 		break;
3304 	}
3305 
3306 	return ret;
3307 }
3308 
fiber_autoneg(struct tg3 * tp,u32 * txflags,u32 * rxflags)3309 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3310 {
3311 	int res = 0;
3312 	struct tg3_fiber_aneginfo aninfo;
3313 	int status = ANEG_FAILED;
3314 	unsigned int tick;
3315 	u32 tmp;
3316 
3317 	tw32_f(MAC_TX_AUTO_NEG, 0);
3318 
3319 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3320 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3321 	udelay(40);
3322 
3323 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3324 	udelay(40);
3325 
3326 	memset(&aninfo, 0, sizeof(aninfo));
3327 	aninfo.flags |= MR_AN_ENABLE;
3328 	aninfo.state = ANEG_STATE_UNKNOWN;
3329 	aninfo.cur_time = 0;
3330 	tick = 0;
3331 	while (++tick < 195000) {
3332 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
3333 		if (status == ANEG_DONE || status == ANEG_FAILED)
3334 			break;
3335 
3336 		udelay(1);
3337 	}
3338 
3339 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3340 	tw32_f(MAC_MODE, tp->mac_mode);
3341 	udelay(40);
3342 
3343 	*txflags = aninfo.txconfig;
3344 	*rxflags = aninfo.flags;
3345 
3346 	if (status == ANEG_DONE &&
3347 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3348 			     MR_LP_ADV_FULL_DUPLEX)))
3349 		res = 1;
3350 
3351 	return res;
3352 }
3353 
tg3_init_bcm8002(struct tg3 * tp)3354 static void tg3_init_bcm8002(struct tg3 *tp)
3355 {
3356 	u32 mac_status = tr32(MAC_STATUS);
3357 	int i;
3358 
3359 	/* Reset when initting first time or we have a link. */
3360 	if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3361 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
3362 		return;
3363 
3364 	/* Set PLL lock range. */
3365 	tg3_writephy(tp, 0x16, 0x8007);
3366 
3367 	/* SW reset */
3368 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3369 
3370 	/* Wait for reset to complete. */
3371 	/* XXX schedule_timeout() ... */
3372 	for (i = 0; i < 500; i++)
3373 		udelay(10);
3374 
3375 	/* Config mode; select PMA/Ch 1 regs. */
3376 	tg3_writephy(tp, 0x10, 0x8411);
3377 
3378 	/* Enable auto-lock and comdet, select txclk for tx. */
3379 	tg3_writephy(tp, 0x11, 0x0a10);
3380 
3381 	tg3_writephy(tp, 0x18, 0x00a0);
3382 	tg3_writephy(tp, 0x16, 0x41ff);
3383 
3384 	/* Assert and deassert POR. */
3385 	tg3_writephy(tp, 0x13, 0x0400);
3386 	udelay(40);
3387 	tg3_writephy(tp, 0x13, 0x0000);
3388 
3389 	tg3_writephy(tp, 0x11, 0x0a50);
3390 	udelay(40);
3391 	tg3_writephy(tp, 0x11, 0x0a10);
3392 
3393 	/* Wait for signal to stabilize */
3394 	/* XXX schedule_timeout() ... */
3395 	for (i = 0; i < 15000; i++)
3396 		udelay(10);
3397 
3398 	/* Deselect the channel register so we can read the PHYID
3399 	 * later.
3400 	 */
3401 	tg3_writephy(tp, 0x10, 0x8011);
3402 }
3403 
tg3_setup_fiber_hw_autoneg(struct tg3 * tp,u32 mac_status)3404 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3405 {
3406 	u16 flowctrl;
3407 	u32 sg_dig_ctrl, sg_dig_status;
3408 	u32 serdes_cfg, expected_sg_dig_ctrl;
3409 	int workaround, port_a;
3410 	int current_link_up;
3411 
3412 	serdes_cfg = 0;
3413 	expected_sg_dig_ctrl = 0;
3414 	workaround = 0;
3415 	port_a = 1;
3416 	current_link_up = 0;
3417 
3418 	if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3419 	    tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3420 		workaround = 1;
3421 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3422 			port_a = 0;
3423 
3424 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
3425 		/* preserve bits 20-23 for voltage regulator */
3426 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3427 	}
3428 
3429 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
3430 
3431 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3432 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3433 			if (workaround) {
3434 				u32 val = serdes_cfg;
3435 
3436 				if (port_a)
3437 					val |= 0xc010000;
3438 				else
3439 					val |= 0x4010000;
3440 				tw32_f(MAC_SERDES_CFG, val);
3441 			}
3442 
3443 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3444 		}
3445 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
3446 			tg3_setup_flow_control(tp, 0, 0);
3447 			current_link_up = 1;
3448 		}
3449 		goto out;
3450 	}
3451 
3452 	/* Want auto-negotiation.  */
3453 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3454 
3455 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3456 	if (flowctrl & ADVERTISE_1000XPAUSE)
3457 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3458 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3459 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3460 
3461 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3462 		if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3463 		    tp->serdes_counter &&
3464 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
3465 				    MAC_STATUS_RCVD_CFG)) ==
3466 		     MAC_STATUS_PCS_SYNCED)) {
3467 			tp->serdes_counter--;
3468 			current_link_up = 1;
3469 			goto out;
3470 		}
3471 restart_autoneg:
3472 		if (workaround)
3473 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3474 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3475 		udelay(5);
3476 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3477 
3478 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3479 		tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3480 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3481 				 MAC_STATUS_SIGNAL_DET)) {
3482 		sg_dig_status = tr32(SG_DIG_STATUS);
3483 		mac_status = tr32(MAC_STATUS);
3484 
3485 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3486 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
3487 			u32 local_adv = 0, remote_adv = 0;
3488 
3489 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3490 				local_adv |= ADVERTISE_1000XPAUSE;
3491 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3492 				local_adv |= ADVERTISE_1000XPSE_ASYM;
3493 
3494 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3495 				remote_adv |= LPA_1000XPAUSE;
3496 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3497 				remote_adv |= LPA_1000XPAUSE_ASYM;
3498 
3499 			tg3_setup_flow_control(tp, local_adv, remote_adv);
3500 			current_link_up = 1;
3501 			tp->serdes_counter = 0;
3502 			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3503 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3504 			if (tp->serdes_counter)
3505 				tp->serdes_counter--;
3506 			else {
3507 				if (workaround) {
3508 					u32 val = serdes_cfg;
3509 
3510 					if (port_a)
3511 						val |= 0xc010000;
3512 					else
3513 						val |= 0x4010000;
3514 
3515 					tw32_f(MAC_SERDES_CFG, val);
3516 				}
3517 
3518 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3519 				udelay(40);
3520 
3521 				/* Link parallel detection - link is up */
3522 				/* only if we have PCS_SYNC and not */
3523 				/* receiving config code words */
3524 				mac_status = tr32(MAC_STATUS);
3525 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3526 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
3527 					tg3_setup_flow_control(tp, 0, 0);
3528 					current_link_up = 1;
3529 					tp->tg3_flags2 |=
3530 						TG3_FLG2_PARALLEL_DETECT;
3531 					tp->serdes_counter =
3532 						SERDES_PARALLEL_DET_TIMEOUT;
3533 				} else
3534 					goto restart_autoneg;
3535 			}
3536 		}
3537 	} else {
3538 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3539 		tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3540 	}
3541 
3542 out:
3543 	return current_link_up;
3544 }
3545 
tg3_setup_fiber_by_hand(struct tg3 * tp,u32 mac_status)3546 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3547 {
3548 	int current_link_up = 0;
3549 
3550 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3551 		goto out;
3552 
3553 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3554 		u32 txflags, rxflags;
3555 		int i;
3556 
3557 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
3558 			u32 local_adv = 0, remote_adv = 0;
3559 
3560 			if (txflags & ANEG_CFG_PS1)
3561 				local_adv |= ADVERTISE_1000XPAUSE;
3562 			if (txflags & ANEG_CFG_PS2)
3563 				local_adv |= ADVERTISE_1000XPSE_ASYM;
3564 
3565 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
3566 				remote_adv |= LPA_1000XPAUSE;
3567 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3568 				remote_adv |= LPA_1000XPAUSE_ASYM;
3569 
3570 			tg3_setup_flow_control(tp, local_adv, remote_adv);
3571 
3572 			current_link_up = 1;
3573 		}
3574 		for (i = 0; i < 30; i++) {
3575 			udelay(20);
3576 			tw32_f(MAC_STATUS,
3577 			       (MAC_STATUS_SYNC_CHANGED |
3578 				MAC_STATUS_CFG_CHANGED));
3579 			udelay(40);
3580 			if ((tr32(MAC_STATUS) &
3581 			     (MAC_STATUS_SYNC_CHANGED |
3582 			      MAC_STATUS_CFG_CHANGED)) == 0)
3583 				break;
3584 		}
3585 
3586 		mac_status = tr32(MAC_STATUS);
3587 		if (current_link_up == 0 &&
3588 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
3589 		    !(mac_status & MAC_STATUS_RCVD_CFG))
3590 			current_link_up = 1;
3591 	} else {
3592 		tg3_setup_flow_control(tp, 0, 0);
3593 
3594 		/* Forcing 1000FD link up. */
3595 		current_link_up = 1;
3596 
3597 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3598 		udelay(40);
3599 
3600 		tw32_f(MAC_MODE, tp->mac_mode);
3601 		udelay(40);
3602 	}
3603 
3604 out:
3605 	return current_link_up;
3606 }
3607 
tg3_setup_fiber_phy(struct tg3 * tp,int force_reset)3608 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3609 {
3610 	u32 orig_pause_cfg;
3611 	u16 orig_active_speed;
3612 	u8 orig_active_duplex;
3613 	u32 mac_status;
3614 	int current_link_up;
3615 	int i;
3616 
3617 	orig_pause_cfg = tp->link_config.active_flowctrl;
3618 	orig_active_speed = tp->link_config.active_speed;
3619 	orig_active_duplex = tp->link_config.active_duplex;
3620 
3621 	if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3622 	    netif_carrier_ok(tp->dev) &&
3623 	    (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3624 		mac_status = tr32(MAC_STATUS);
3625 		mac_status &= (MAC_STATUS_PCS_SYNCED |
3626 			       MAC_STATUS_SIGNAL_DET |
3627 			       MAC_STATUS_CFG_CHANGED |
3628 			       MAC_STATUS_RCVD_CFG);
3629 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
3630 				   MAC_STATUS_SIGNAL_DET)) {
3631 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3632 					    MAC_STATUS_CFG_CHANGED));
3633 			return 0;
3634 		}
3635 	}
3636 
3637 	tw32_f(MAC_TX_AUTO_NEG, 0);
3638 
3639 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3640 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3641 	tw32_f(MAC_MODE, tp->mac_mode);
3642 	udelay(40);
3643 
3644 	if (tp->phy_id == PHY_ID_BCM8002)
3645 		tg3_init_bcm8002(tp);
3646 
3647 	/* Enable link change event even when serdes polling.  */
3648 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3649 	udelay(40);
3650 
3651 	current_link_up = 0;
3652 	mac_status = tr32(MAC_STATUS);
3653 
3654 	if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3655 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3656 	else
3657 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3658 
3659 	tp->hw_status->status =
3660 		(SD_STATUS_UPDATED |
3661 		 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3662 
3663 	for (i = 0; i < 100; i++) {
3664 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3665 				    MAC_STATUS_CFG_CHANGED));
3666 		udelay(5);
3667 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3668 					 MAC_STATUS_CFG_CHANGED |
3669 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3670 			break;
3671 	}
3672 
3673 	mac_status = tr32(MAC_STATUS);
3674 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3675 		current_link_up = 0;
3676 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3677 		    tp->serdes_counter == 0) {
3678 			tw32_f(MAC_MODE, (tp->mac_mode |
3679 					  MAC_MODE_SEND_CONFIGS));
3680 			udelay(1);
3681 			tw32_f(MAC_MODE, tp->mac_mode);
3682 		}
3683 	}
3684 
3685 	if (current_link_up == 1) {
3686 		tp->link_config.active_speed = SPEED_1000;
3687 		tp->link_config.active_duplex = DUPLEX_FULL;
3688 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
3689 				    LED_CTRL_LNKLED_OVERRIDE |
3690 				    LED_CTRL_1000MBPS_ON));
3691 	} else {
3692 		tp->link_config.active_speed = SPEED_INVALID;
3693 		tp->link_config.active_duplex = DUPLEX_INVALID;
3694 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
3695 				    LED_CTRL_LNKLED_OVERRIDE |
3696 				    LED_CTRL_TRAFFIC_OVERRIDE));
3697 	}
3698 
3699 	if (current_link_up != netif_carrier_ok(tp->dev)) {
3700 		if (current_link_up)
3701 			netif_carrier_on(tp->dev);
3702 		else
3703 			netif_carrier_off(tp->dev);
3704 		tg3_link_report(tp);
3705 	} else {
3706 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
3707 		if (orig_pause_cfg != now_pause_cfg ||
3708 		    orig_active_speed != tp->link_config.active_speed ||
3709 		    orig_active_duplex != tp->link_config.active_duplex)
3710 			tg3_link_report(tp);
3711 	}
3712 
3713 	return 0;
3714 }
3715 
tg3_setup_fiber_mii_phy(struct tg3 * tp,int force_reset)3716 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3717 {
3718 	int current_link_up, err = 0;
3719 	u32 bmsr, bmcr;
3720 	u16 current_speed;
3721 	u8 current_duplex;
3722 	u32 local_adv, remote_adv;
3723 
3724 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3725 	tw32_f(MAC_MODE, tp->mac_mode);
3726 	udelay(40);
3727 
3728 	tw32(MAC_EVENT, 0);
3729 
3730 	tw32_f(MAC_STATUS,
3731 	     (MAC_STATUS_SYNC_CHANGED |
3732 	      MAC_STATUS_CFG_CHANGED |
3733 	      MAC_STATUS_MI_COMPLETION |
3734 	      MAC_STATUS_LNKSTATE_CHANGED));
3735 	udelay(40);
3736 
3737 	if (force_reset)
3738 		tg3_phy_reset(tp);
3739 
3740 	current_link_up = 0;
3741 	current_speed = SPEED_INVALID;
3742 	current_duplex = DUPLEX_INVALID;
3743 
3744 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3745 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3746 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3747 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3748 			bmsr |= BMSR_LSTATUS;
3749 		else
3750 			bmsr &= ~BMSR_LSTATUS;
3751 	}
3752 
3753 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3754 
3755 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3756 	    (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3757 		/* do nothing, just check for link up at the end */
3758 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3759 		u32 adv, new_adv;
3760 
3761 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3762 		new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3763 				  ADVERTISE_1000XPAUSE |
3764 				  ADVERTISE_1000XPSE_ASYM |
3765 				  ADVERTISE_SLCT);
3766 
3767 		new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3768 
3769 		if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3770 			new_adv |= ADVERTISE_1000XHALF;
3771 		if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3772 			new_adv |= ADVERTISE_1000XFULL;
3773 
3774 		if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3775 			tg3_writephy(tp, MII_ADVERTISE, new_adv);
3776 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3777 			tg3_writephy(tp, MII_BMCR, bmcr);
3778 
3779 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3780 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3781 			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3782 
3783 			return err;
3784 		}
3785 	} else {
3786 		u32 new_bmcr;
3787 
3788 		bmcr &= ~BMCR_SPEED1000;
3789 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3790 
3791 		if (tp->link_config.duplex == DUPLEX_FULL)
3792 			new_bmcr |= BMCR_FULLDPLX;
3793 
3794 		if (new_bmcr != bmcr) {
3795 			/* BMCR_SPEED1000 is a reserved bit that needs
3796 			 * to be set on write.
3797 			 */
3798 			new_bmcr |= BMCR_SPEED1000;
3799 
3800 			/* Force a linkdown */
3801 			if (netif_carrier_ok(tp->dev)) {
3802 				u32 adv;
3803 
3804 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3805 				adv &= ~(ADVERTISE_1000XFULL |
3806 					 ADVERTISE_1000XHALF |
3807 					 ADVERTISE_SLCT);
3808 				tg3_writephy(tp, MII_ADVERTISE, adv);
3809 				tg3_writephy(tp, MII_BMCR, bmcr |
3810 							   BMCR_ANRESTART |
3811 							   BMCR_ANENABLE);
3812 				udelay(10);
3813 				netif_carrier_off(tp->dev);
3814 			}
3815 			tg3_writephy(tp, MII_BMCR, new_bmcr);
3816 			bmcr = new_bmcr;
3817 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3818 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3819 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3820 			    ASIC_REV_5714) {
3821 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3822 					bmsr |= BMSR_LSTATUS;
3823 				else
3824 					bmsr &= ~BMSR_LSTATUS;
3825 			}
3826 			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3827 		}
3828 	}
3829 
3830 	if (bmsr & BMSR_LSTATUS) {
3831 		current_speed = SPEED_1000;
3832 		current_link_up = 1;
3833 		if (bmcr & BMCR_FULLDPLX)
3834 			current_duplex = DUPLEX_FULL;
3835 		else
3836 			current_duplex = DUPLEX_HALF;
3837 
3838 		local_adv = 0;
3839 		remote_adv = 0;
3840 
3841 		if (bmcr & BMCR_ANENABLE) {
3842 			u32 common;
3843 
3844 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3845 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3846 			common = local_adv & remote_adv;
3847 			if (common & (ADVERTISE_1000XHALF |
3848 				      ADVERTISE_1000XFULL)) {
3849 				if (common & ADVERTISE_1000XFULL)
3850 					current_duplex = DUPLEX_FULL;
3851 				else
3852 					current_duplex = DUPLEX_HALF;
3853 			}
3854 			else
3855 				current_link_up = 0;
3856 		}
3857 	}
3858 
3859 	if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3860 		tg3_setup_flow_control(tp, local_adv, remote_adv);
3861 
3862 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3863 	if (tp->link_config.active_duplex == DUPLEX_HALF)
3864 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3865 
3866 	tw32_f(MAC_MODE, tp->mac_mode);
3867 	udelay(40);
3868 
3869 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3870 
3871 	tp->link_config.active_speed = current_speed;
3872 	tp->link_config.active_duplex = current_duplex;
3873 
3874 	if (current_link_up != netif_carrier_ok(tp->dev)) {
3875 		if (current_link_up)
3876 			netif_carrier_on(tp->dev);
3877 		else {
3878 			netif_carrier_off(tp->dev);
3879 			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3880 		}
3881 		tg3_link_report(tp);
3882 	}
3883 	return err;
3884 }
3885 
tg3_serdes_parallel_detect(struct tg3 * tp)3886 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3887 {
3888 	if (tp->serdes_counter) {
3889 		/* Give autoneg time to complete. */
3890 		tp->serdes_counter--;
3891 		return;
3892 	}
3893 	if (!netif_carrier_ok(tp->dev) &&
3894 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3895 		u32 bmcr;
3896 
3897 		tg3_readphy(tp, MII_BMCR, &bmcr);
3898 		if (bmcr & BMCR_ANENABLE) {
3899 			u32 phy1, phy2;
3900 
3901 			/* Select shadow register 0x1f */
3902 			tg3_writephy(tp, 0x1c, 0x7c00);
3903 			tg3_readphy(tp, 0x1c, &phy1);
3904 
3905 			/* Select expansion interrupt status register */
3906 			tg3_writephy(tp, 0x17, 0x0f01);
3907 			tg3_readphy(tp, 0x15, &phy2);
3908 			tg3_readphy(tp, 0x15, &phy2);
3909 
3910 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3911 				/* We have signal detect and not receiving
3912 				 * config code words, link is up by parallel
3913 				 * detection.
3914 				 */
3915 
3916 				bmcr &= ~BMCR_ANENABLE;
3917 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3918 				tg3_writephy(tp, MII_BMCR, bmcr);
3919 				tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3920 			}
3921 		}
3922 	}
3923 	else if (netif_carrier_ok(tp->dev) &&
3924 		 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3925 		 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3926 		u32 phy2;
3927 
3928 		/* Select expansion interrupt status register */
3929 		tg3_writephy(tp, 0x17, 0x0f01);
3930 		tg3_readphy(tp, 0x15, &phy2);
3931 		if (phy2 & 0x20) {
3932 			u32 bmcr;
3933 
3934 			/* Config code words received, turn on autoneg. */
3935 			tg3_readphy(tp, MII_BMCR, &bmcr);
3936 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3937 
3938 			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3939 
3940 		}
3941 	}
3942 }
3943 
tg3_setup_phy(struct tg3 * tp,int force_reset)3944 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3945 {
3946 	int err;
3947 
3948 	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3949 		err = tg3_setup_fiber_phy(tp, force_reset);
3950 	} else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3951 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
3952 	} else {
3953 		err = tg3_setup_copper_phy(tp, force_reset);
3954 	}
3955 
3956 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
3957 		u32 val, scale;
3958 
3959 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3960 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3961 			scale = 65;
3962 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3963 			scale = 6;
3964 		else
3965 			scale = 12;
3966 
3967 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3968 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3969 		tw32(GRC_MISC_CFG, val);
3970 	}
3971 
3972 	if (tp->link_config.active_speed == SPEED_1000 &&
3973 	    tp->link_config.active_duplex == DUPLEX_HALF)
3974 		tw32(MAC_TX_LENGTHS,
3975 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3976 		      (6 << TX_LENGTHS_IPG_SHIFT) |
3977 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3978 	else
3979 		tw32(MAC_TX_LENGTHS,
3980 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3981 		      (6 << TX_LENGTHS_IPG_SHIFT) |
3982 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3983 
3984 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3985 		if (netif_carrier_ok(tp->dev)) {
3986 			tw32(HOSTCC_STAT_COAL_TICKS,
3987 			     tp->coal.stats_block_coalesce_usecs);
3988 		} else {
3989 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
3990 		}
3991 	}
3992 
3993 	if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3994 		u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3995 		if (!netif_carrier_ok(tp->dev))
3996 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3997 			      tp->pwrmgmt_thresh;
3998 		else
3999 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4000 		tw32(PCIE_PWR_MGMT_THRESH, val);
4001 	}
4002 
4003 	return err;
4004 }
4005 
4006 /* This is called whenever we suspect that the system chipset is re-
4007  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4008  * is bogus tx completions. We try to recover by setting the
4009  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4010  * in the workqueue.
4011  */
tg3_tx_recover(struct tg3 * tp)4012 static void tg3_tx_recover(struct tg3 *tp)
4013 {
4014 	BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4015 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
4016 
4017 	printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4018 	       "mapped I/O cycles to the network device, attempting to "
4019 	       "recover. Please report the problem to the driver maintainer "
4020 	       "and include system chipset information.\n", tp->dev->name);
4021 
4022 	spin_lock(&tp->lock);
4023 	tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4024 	spin_unlock(&tp->lock);
4025 }
4026 
tg3_tx_avail(struct tg3 * tp)4027 static inline u32 tg3_tx_avail(struct tg3 *tp)
4028 {
4029 	smp_mb();
4030 	return (tp->tx_pending -
4031 		((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
4032 }
4033 
4034 /* Tigon3 never reports partial packet sends.  So we do not
4035  * need special logic to handle SKBs that have not had all
4036  * of their frags sent yet, like SunGEM does.
4037  */
tg3_tx(struct tg3 * tp)4038 static void tg3_tx(struct tg3 *tp)
4039 {
4040 	u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
4041 	u32 sw_idx = tp->tx_cons;
4042 
4043 	while (sw_idx != hw_idx) {
4044 		struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
4045 		struct sk_buff *skb = ri->skb;
4046 		int i, tx_bug = 0;
4047 
4048 		if (unlikely(skb == NULL)) {
4049 			tg3_tx_recover(tp);
4050 			return;
4051 		}
4052 
4053 		skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4054 
4055 		ri->skb = NULL;
4056 
4057 		sw_idx = NEXT_TX(sw_idx);
4058 
4059 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4060 			ri = &tp->tx_buffers[sw_idx];
4061 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4062 				tx_bug = 1;
4063 			sw_idx = NEXT_TX(sw_idx);
4064 		}
4065 
4066 		dev_kfree_skb(skb);
4067 
4068 		if (unlikely(tx_bug)) {
4069 			tg3_tx_recover(tp);
4070 			return;
4071 		}
4072 	}
4073 
4074 	tp->tx_cons = sw_idx;
4075 
4076 	/* Need to make the tx_cons update visible to tg3_start_xmit()
4077 	 * before checking for netif_queue_stopped().  Without the
4078 	 * memory barrier, there is a small possibility that tg3_start_xmit()
4079 	 * will miss it and cause the queue to be stopped forever.
4080 	 */
4081 	smp_mb();
4082 
4083 	if (unlikely(netif_queue_stopped(tp->dev) &&
4084 		     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
4085 		netif_tx_lock(tp->dev);
4086 		if (netif_queue_stopped(tp->dev) &&
4087 		    (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
4088 			netif_wake_queue(tp->dev);
4089 		netif_tx_unlock(tp->dev);
4090 	}
4091 }
4092 
4093 /* Returns size of skb allocated or < 0 on error.
4094  *
4095  * We only need to fill in the address because the other members
4096  * of the RX descriptor are invariant, see tg3_init_rings.
4097  *
4098  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4099  * posting buffers we only dirty the first cache line of the RX
4100  * descriptor (containing the address).  Whereas for the RX status
4101  * buffers the cpu only reads the last cacheline of the RX descriptor
4102  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4103  */
tg3_alloc_rx_skb(struct tg3 * tp,u32 opaque_key,int src_idx,u32 dest_idx_unmasked)4104 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4105 			    int src_idx, u32 dest_idx_unmasked)
4106 {
4107 	struct tg3_rx_buffer_desc *desc;
4108 	struct ring_info *map, *src_map;
4109 	struct sk_buff *skb;
4110 	dma_addr_t mapping;
4111 	int skb_size, dest_idx;
4112 
4113 	src_map = NULL;
4114 	switch (opaque_key) {
4115 	case RXD_OPAQUE_RING_STD:
4116 		dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4117 		desc = &tp->rx_std[dest_idx];
4118 		map = &tp->rx_std_buffers[dest_idx];
4119 		if (src_idx >= 0)
4120 			src_map = &tp->rx_std_buffers[src_idx];
4121 		skb_size = tp->rx_pkt_buf_sz;
4122 		break;
4123 
4124 	case RXD_OPAQUE_RING_JUMBO:
4125 		dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4126 		desc = &tp->rx_jumbo[dest_idx];
4127 		map = &tp->rx_jumbo_buffers[dest_idx];
4128 		if (src_idx >= 0)
4129 			src_map = &tp->rx_jumbo_buffers[src_idx];
4130 		skb_size = RX_JUMBO_PKT_BUF_SZ;
4131 		break;
4132 
4133 	default:
4134 		return -EINVAL;
4135 	}
4136 
4137 	/* Do not overwrite any of the map or rp information
4138 	 * until we are sure we can commit to a new buffer.
4139 	 *
4140 	 * Callers depend upon this behavior and assume that
4141 	 * we leave everything unchanged if we fail.
4142 	 */
4143 	skb = netdev_alloc_skb(tp->dev, skb_size);
4144 	if (skb == NULL)
4145 		return -ENOMEM;
4146 
4147 	skb_reserve(skb, tp->rx_offset);
4148 
4149 	mapping = pci_map_single(tp->pdev, skb->data,
4150 				 skb_size - tp->rx_offset,
4151 				 PCI_DMA_FROMDEVICE);
4152 
4153 	map->skb = skb;
4154 	pci_unmap_addr_set(map, mapping, mapping);
4155 
4156 	if (src_map != NULL)
4157 		src_map->skb = NULL;
4158 
4159 	desc->addr_hi = ((u64)mapping >> 32);
4160 	desc->addr_lo = ((u64)mapping & 0xffffffff);
4161 
4162 	return skb_size;
4163 }
4164 
4165 /* We only need to move over in the address because the other
4166  * members of the RX descriptor are invariant.  See notes above
4167  * tg3_alloc_rx_skb for full details.
4168  */
tg3_recycle_rx(struct tg3 * tp,u32 opaque_key,int src_idx,u32 dest_idx_unmasked)4169 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4170 			   int src_idx, u32 dest_idx_unmasked)
4171 {
4172 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4173 	struct ring_info *src_map, *dest_map;
4174 	int dest_idx;
4175 
4176 	switch (opaque_key) {
4177 	case RXD_OPAQUE_RING_STD:
4178 		dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4179 		dest_desc = &tp->rx_std[dest_idx];
4180 		dest_map = &tp->rx_std_buffers[dest_idx];
4181 		src_desc = &tp->rx_std[src_idx];
4182 		src_map = &tp->rx_std_buffers[src_idx];
4183 		break;
4184 
4185 	case RXD_OPAQUE_RING_JUMBO:
4186 		dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4187 		dest_desc = &tp->rx_jumbo[dest_idx];
4188 		dest_map = &tp->rx_jumbo_buffers[dest_idx];
4189 		src_desc = &tp->rx_jumbo[src_idx];
4190 		src_map = &tp->rx_jumbo_buffers[src_idx];
4191 		break;
4192 
4193 	default:
4194 		return;
4195 	}
4196 
4197 	dest_map->skb = src_map->skb;
4198 	pci_unmap_addr_set(dest_map, mapping,
4199 			   pci_unmap_addr(src_map, mapping));
4200 	dest_desc->addr_hi = src_desc->addr_hi;
4201 	dest_desc->addr_lo = src_desc->addr_lo;
4202 
4203 	src_map->skb = NULL;
4204 }
4205 
4206 #if TG3_VLAN_TAG_USED
tg3_vlan_rx(struct tg3 * tp,struct sk_buff * skb,u16 vlan_tag)4207 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4208 {
4209 	return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4210 }
4211 #endif
4212 
4213 /* The RX ring scheme is composed of multiple rings which post fresh
4214  * buffers to the chip, and one special ring the chip uses to report
4215  * status back to the host.
4216  *
4217  * The special ring reports the status of received packets to the
4218  * host.  The chip does not write into the original descriptor the
4219  * RX buffer was obtained from.  The chip simply takes the original
4220  * descriptor as provided by the host, updates the status and length
4221  * field, then writes this into the next status ring entry.
4222  *
4223  * Each ring the host uses to post buffers to the chip is described
4224  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4225  * it is first placed into the on-chip ram.  When the packet's length
4226  * is known, it walks down the TG3_BDINFO entries to select the ring.
4227  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4228  * which is within the range of the new packet's length is chosen.
4229  *
4230  * The "separate ring for rx status" scheme may sound queer, but it makes
4231  * sense from a cache coherency perspective.  If only the host writes
4232  * to the buffer post rings, and only the chip writes to the rx status
4233  * rings, then cache lines never move beyond shared-modified state.
4234  * If both the host and chip were to write into the same ring, cache line
4235  * eviction could occur since both entities want it in an exclusive state.
4236  */
tg3_rx(struct tg3 * tp,int budget)4237 static int tg3_rx(struct tg3 *tp, int budget)
4238 {
4239 	u32 work_mask, rx_std_posted = 0;
4240 	u32 sw_idx = tp->rx_rcb_ptr;
4241 	u16 hw_idx;
4242 	int received;
4243 
4244 	hw_idx = tp->hw_status->idx[0].rx_producer;
4245 	/*
4246 	 * We need to order the read of hw_idx and the read of
4247 	 * the opaque cookie.
4248 	 */
4249 	rmb();
4250 	work_mask = 0;
4251 	received = 0;
4252 	while (sw_idx != hw_idx && budget > 0) {
4253 		struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4254 		unsigned int len;
4255 		struct sk_buff *skb;
4256 		dma_addr_t dma_addr;
4257 		u32 opaque_key, desc_idx, *post_ptr;
4258 
4259 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4260 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4261 		if (opaque_key == RXD_OPAQUE_RING_STD) {
4262 			dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4263 						  mapping);
4264 			skb = tp->rx_std_buffers[desc_idx].skb;
4265 			post_ptr = &tp->rx_std_ptr;
4266 			rx_std_posted++;
4267 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4268 			dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4269 						  mapping);
4270 			skb = tp->rx_jumbo_buffers[desc_idx].skb;
4271 			post_ptr = &tp->rx_jumbo_ptr;
4272 		}
4273 		else {
4274 			goto next_pkt_nopost;
4275 		}
4276 
4277 		work_mask |= opaque_key;
4278 
4279 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4280 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4281 		drop_it:
4282 			tg3_recycle_rx(tp, opaque_key,
4283 				       desc_idx, *post_ptr);
4284 		drop_it_no_recycle:
4285 			/* Other statistics kept track of by card. */
4286 			tp->net_stats.rx_dropped++;
4287 			goto next_pkt;
4288 		}
4289 
4290 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4291 		      ETH_FCS_LEN;
4292 
4293 		if (len > RX_COPY_THRESHOLD
4294 			&& tp->rx_offset == NET_IP_ALIGN
4295 			/* rx_offset will likely not equal NET_IP_ALIGN
4296 			 * if this is a 5701 card running in PCI-X mode
4297 			 * [see tg3_get_invariants()]
4298 			 */
4299 		) {
4300 			int skb_size;
4301 
4302 			skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4303 						    desc_idx, *post_ptr);
4304 			if (skb_size < 0)
4305 				goto drop_it;
4306 
4307 			pci_unmap_single(tp->pdev, dma_addr,
4308 					 skb_size - tp->rx_offset,
4309 					 PCI_DMA_FROMDEVICE);
4310 
4311 			skb_put(skb, len);
4312 		} else {
4313 			struct sk_buff *copy_skb;
4314 
4315 			tg3_recycle_rx(tp, opaque_key,
4316 				       desc_idx, *post_ptr);
4317 
4318 			copy_skb = netdev_alloc_skb(tp->dev,
4319 						    len + TG3_RAW_IP_ALIGN);
4320 			if (copy_skb == NULL)
4321 				goto drop_it_no_recycle;
4322 
4323 			skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4324 			skb_put(copy_skb, len);
4325 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4326 			skb_copy_from_linear_data(skb, copy_skb->data, len);
4327 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4328 
4329 			/* We'll reuse the original ring buffer. */
4330 			skb = copy_skb;
4331 		}
4332 
4333 		if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4334 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4335 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4336 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
4337 			skb->ip_summed = CHECKSUM_UNNECESSARY;
4338 		else
4339 			skb->ip_summed = CHECKSUM_NONE;
4340 
4341 		skb->protocol = eth_type_trans(skb, tp->dev);
4342 #if TG3_VLAN_TAG_USED
4343 		if (tp->vlgrp != NULL &&
4344 		    desc->type_flags & RXD_FLAG_VLAN) {
4345 			tg3_vlan_rx(tp, skb,
4346 				    desc->err_vlan & RXD_VLAN_MASK);
4347 		} else
4348 #endif
4349 			netif_receive_skb(skb);
4350 
4351 		received++;
4352 		budget--;
4353 
4354 next_pkt:
4355 		(*post_ptr)++;
4356 
4357 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4358 			u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4359 
4360 			tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4361 				     TG3_64BIT_REG_LOW, idx);
4362 			work_mask &= ~RXD_OPAQUE_RING_STD;
4363 			rx_std_posted = 0;
4364 		}
4365 next_pkt_nopost:
4366 		sw_idx++;
4367 		sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4368 
4369 		/* Refresh hw_idx to see if there is new work */
4370 		if (sw_idx == hw_idx) {
4371 			hw_idx = tp->hw_status->idx[0].rx_producer;
4372 			rmb();
4373 		}
4374 	}
4375 
4376 	/* ACK the status ring. */
4377 	tp->rx_rcb_ptr = sw_idx;
4378 	tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4379 
4380 	/* Refill RX ring(s). */
4381 	if (work_mask & RXD_OPAQUE_RING_STD) {
4382 		sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4383 		tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4384 			     sw_idx);
4385 	}
4386 	if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4387 		sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4388 		tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4389 			     sw_idx);
4390 	}
4391 	mmiowb();
4392 
4393 	return received;
4394 }
4395 
tg3_poll_work(struct tg3 * tp,int work_done,int budget)4396 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4397 {
4398 	struct tg3_hw_status *sblk = tp->hw_status;
4399 
4400 	/* handle link change and other phy events */
4401 	if (!(tp->tg3_flags &
4402 	      (TG3_FLAG_USE_LINKCHG_REG |
4403 	       TG3_FLAG_POLL_SERDES))) {
4404 		if (sblk->status & SD_STATUS_LINK_CHG) {
4405 			sblk->status = SD_STATUS_UPDATED |
4406 				(sblk->status & ~SD_STATUS_LINK_CHG);
4407 			spin_lock(&tp->lock);
4408 			if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4409 				tw32_f(MAC_STATUS,
4410 				     (MAC_STATUS_SYNC_CHANGED |
4411 				      MAC_STATUS_CFG_CHANGED |
4412 				      MAC_STATUS_MI_COMPLETION |
4413 				      MAC_STATUS_LNKSTATE_CHANGED));
4414 				udelay(40);
4415 			} else
4416 				tg3_setup_phy(tp, 0);
4417 			spin_unlock(&tp->lock);
4418 		}
4419 	}
4420 
4421 	/* run TX completion thread */
4422 	if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4423 		tg3_tx(tp);
4424 		if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4425 			return work_done;
4426 	}
4427 
4428 	/* run RX thread, within the bounds set by NAPI.
4429 	 * All RX "locking" is done by ensuring outside
4430 	 * code synchronizes with tg3->napi.poll()
4431 	 */
4432 	if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4433 		work_done += tg3_rx(tp, budget - work_done);
4434 
4435 	return work_done;
4436 }
4437 
tg3_poll(struct napi_struct * napi,int budget)4438 static int tg3_poll(struct napi_struct *napi, int budget)
4439 {
4440 	struct tg3 *tp = container_of(napi, struct tg3, napi);
4441 	int work_done = 0;
4442 	struct tg3_hw_status *sblk = tp->hw_status;
4443 
4444 	while (1) {
4445 		work_done = tg3_poll_work(tp, work_done, budget);
4446 
4447 		if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4448 			goto tx_recovery;
4449 
4450 		if (unlikely(work_done >= budget))
4451 			break;
4452 
4453 		if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4454 			/* tp->last_tag is used in tg3_restart_ints() below
4455 			 * to tell the hw how much work has been processed,
4456 			 * so we must read it before checking for more work.
4457 			 */
4458 			tp->last_tag = sblk->status_tag;
4459 			rmb();
4460 		} else
4461 			sblk->status &= ~SD_STATUS_UPDATED;
4462 
4463 		if (likely(!tg3_has_work(tp))) {
4464 			netif_rx_complete(napi);
4465 			tg3_restart_ints(tp);
4466 			break;
4467 		}
4468 	}
4469 
4470 	return work_done;
4471 
4472 tx_recovery:
4473 	/* work_done is guaranteed to be less than budget. */
4474 	netif_rx_complete(napi);
4475 	schedule_work(&tp->reset_task);
4476 	return work_done;
4477 }
4478 
tg3_irq_quiesce(struct tg3 * tp)4479 static void tg3_irq_quiesce(struct tg3 *tp)
4480 {
4481 	BUG_ON(tp->irq_sync);
4482 
4483 	tp->irq_sync = 1;
4484 	smp_mb();
4485 
4486 	synchronize_irq(tp->pdev->irq);
4487 }
4488 
tg3_irq_sync(struct tg3 * tp)4489 static inline int tg3_irq_sync(struct tg3 *tp)
4490 {
4491 	return tp->irq_sync;
4492 }
4493 
4494 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4495  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4496  * with as well.  Most of the time, this is not necessary except when
4497  * shutting down the device.
4498  */
tg3_full_lock(struct tg3 * tp,int irq_sync)4499 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4500 {
4501 	spin_lock_bh(&tp->lock);
4502 	if (irq_sync)
4503 		tg3_irq_quiesce(tp);
4504 }
4505 
tg3_full_unlock(struct tg3 * tp)4506 static inline void tg3_full_unlock(struct tg3 *tp)
4507 {
4508 	spin_unlock_bh(&tp->lock);
4509 }
4510 
4511 /* One-shot MSI handler - Chip automatically disables interrupt
4512  * after sending MSI so driver doesn't have to do it.
4513  */
tg3_msi_1shot(int irq,void * dev_id)4514 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4515 {
4516 	struct net_device *dev = dev_id;
4517 	struct tg3 *tp = netdev_priv(dev);
4518 
4519 	prefetch(tp->hw_status);
4520 	prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4521 
4522 	if (likely(!tg3_irq_sync(tp)))
4523 		netif_rx_schedule(&tp->napi);
4524 
4525 	return IRQ_HANDLED;
4526 }
4527 
4528 /* MSI ISR - No need to check for interrupt sharing and no need to
4529  * flush status block and interrupt mailbox. PCI ordering rules
4530  * guarantee that MSI will arrive after the status block.
4531  */
tg3_msi(int irq,void * dev_id)4532 static irqreturn_t tg3_msi(int irq, void *dev_id)
4533 {
4534 	struct net_device *dev = dev_id;
4535 	struct tg3 *tp = netdev_priv(dev);
4536 
4537 	prefetch(tp->hw_status);
4538 	prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4539 	/*
4540 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
4541 	 * chip-internal interrupt pending events.
4542 	 * Writing non-zero to intr-mbox-0 additional tells the
4543 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
4544 	 * event coalescing.
4545 	 */
4546 	tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4547 	if (likely(!tg3_irq_sync(tp)))
4548 		netif_rx_schedule(&tp->napi);
4549 
4550 	return IRQ_RETVAL(1);
4551 }
4552 
tg3_interrupt(int irq,void * dev_id)4553 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4554 {
4555 	struct net_device *dev = dev_id;
4556 	struct tg3 *tp = netdev_priv(dev);
4557 	struct tg3_hw_status *sblk = tp->hw_status;
4558 	unsigned int handled = 1;
4559 
4560 	/* In INTx mode, it is possible for the interrupt to arrive at
4561 	 * the CPU before the status block posted prior to the interrupt.
4562 	 * Reading the PCI State register will confirm whether the
4563 	 * interrupt is ours and will flush the status block.
4564 	 */
4565 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4566 		if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4567 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4568 			handled = 0;
4569 			goto out;
4570 		}
4571 	}
4572 
4573 	/*
4574 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
4575 	 * chip-internal interrupt pending events.
4576 	 * Writing non-zero to intr-mbox-0 additional tells the
4577 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
4578 	 * event coalescing.
4579 	 *
4580 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
4581 	 * spurious interrupts.  The flush impacts performance but
4582 	 * excessive spurious interrupts can be worse in some cases.
4583 	 */
4584 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4585 	if (tg3_irq_sync(tp))
4586 		goto out;
4587 	sblk->status &= ~SD_STATUS_UPDATED;
4588 	if (likely(tg3_has_work(tp))) {
4589 		prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4590 		netif_rx_schedule(&tp->napi);
4591 	} else {
4592 		/* No work, shared interrupt perhaps?  re-enable
4593 		 * interrupts, and flush that PCI write
4594 		 */
4595 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4596 			       0x00000000);
4597 	}
4598 out:
4599 	return IRQ_RETVAL(handled);
4600 }
4601 
tg3_interrupt_tagged(int irq,void * dev_id)4602 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4603 {
4604 	struct net_device *dev = dev_id;
4605 	struct tg3 *tp = netdev_priv(dev);
4606 	struct tg3_hw_status *sblk = tp->hw_status;
4607 	unsigned int handled = 1;
4608 
4609 	/* In INTx mode, it is possible for the interrupt to arrive at
4610 	 * the CPU before the status block posted prior to the interrupt.
4611 	 * Reading the PCI State register will confirm whether the
4612 	 * interrupt is ours and will flush the status block.
4613 	 */
4614 	if (unlikely(sblk->status_tag == tp->last_tag)) {
4615 		if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4616 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4617 			handled = 0;
4618 			goto out;
4619 		}
4620 	}
4621 
4622 	/*
4623 	 * writing any value to intr-mbox-0 clears PCI INTA# and
4624 	 * chip-internal interrupt pending events.
4625 	 * writing non-zero to intr-mbox-0 additional tells the
4626 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
4627 	 * event coalescing.
4628 	 *
4629 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
4630 	 * spurious interrupts.  The flush impacts performance but
4631 	 * excessive spurious interrupts can be worse in some cases.
4632 	 */
4633 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4634 	if (tg3_irq_sync(tp))
4635 		goto out;
4636 	if (netif_rx_schedule_prep(&tp->napi)) {
4637 		prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4638 		/* Update last_tag to mark that this status has been
4639 		 * seen. Because interrupt may be shared, we may be
4640 		 * racing with tg3_poll(), so only update last_tag
4641 		 * if tg3_poll() is not scheduled.
4642 		 */
4643 		tp->last_tag = sblk->status_tag;
4644 		__netif_rx_schedule(&tp->napi);
4645 	}
4646 out:
4647 	return IRQ_RETVAL(handled);
4648 }
4649 
4650 /* ISR for interrupt test */
tg3_test_isr(int irq,void * dev_id)4651 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4652 {
4653 	struct net_device *dev = dev_id;
4654 	struct tg3 *tp = netdev_priv(dev);
4655 	struct tg3_hw_status *sblk = tp->hw_status;
4656 
4657 	if ((sblk->status & SD_STATUS_UPDATED) ||
4658 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4659 		tg3_disable_ints(tp);
4660 		return IRQ_RETVAL(1);
4661 	}
4662 	return IRQ_RETVAL(0);
4663 }
4664 
4665 static int tg3_init_hw(struct tg3 *, int);
4666 static int tg3_halt(struct tg3 *, int, int);
4667 
4668 /* Restart hardware after configuration changes, self-test, etc.
4669  * Invoked with tp->lock held.
4670  */
tg3_restart_hw(struct tg3 * tp,int reset_phy)4671 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4672 	__releases(tp->lock)
4673 	__acquires(tp->lock)
4674 {
4675 	int err;
4676 
4677 	err = tg3_init_hw(tp, reset_phy);
4678 	if (err) {
4679 		printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4680 		       "aborting.\n", tp->dev->name);
4681 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4682 		tg3_full_unlock(tp);
4683 		del_timer_sync(&tp->timer);
4684 		tp->irq_sync = 0;
4685 		napi_enable(&tp->napi);
4686 		dev_close(tp->dev);
4687 		tg3_full_lock(tp, 0);
4688 	}
4689 	return err;
4690 }
4691 
4692 #ifdef CONFIG_NET_POLL_CONTROLLER
tg3_poll_controller(struct net_device * dev)4693 static void tg3_poll_controller(struct net_device *dev)
4694 {
4695 	struct tg3 *tp = netdev_priv(dev);
4696 
4697 	tg3_interrupt(tp->pdev->irq, dev);
4698 }
4699 #endif
4700 
tg3_reset_task(struct work_struct * work)4701 static void tg3_reset_task(struct work_struct *work)
4702 {
4703 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
4704 	int err;
4705 	unsigned int restart_timer;
4706 
4707 	tg3_full_lock(tp, 0);
4708 
4709 	if (!netif_running(tp->dev)) {
4710 		tg3_full_unlock(tp);
4711 		return;
4712 	}
4713 
4714 	tg3_full_unlock(tp);
4715 
4716 	tg3_phy_stop(tp);
4717 
4718 	tg3_netif_stop(tp);
4719 
4720 	tg3_full_lock(tp, 1);
4721 
4722 	restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4723 	tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4724 
4725 	if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4726 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
4727 		tp->write32_rx_mbox = tg3_write_flush_reg32;
4728 		tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4729 		tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4730 	}
4731 
4732 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4733 	err = tg3_init_hw(tp, 1);
4734 	if (err)
4735 		goto out;
4736 
4737 	tg3_netif_start(tp);
4738 
4739 	if (restart_timer)
4740 		mod_timer(&tp->timer, jiffies + 1);
4741 
4742 out:
4743 	tg3_full_unlock(tp);
4744 
4745 	if (!err)
4746 		tg3_phy_start(tp);
4747 }
4748 
tg3_dump_short_state(struct tg3 * tp)4749 static void tg3_dump_short_state(struct tg3 *tp)
4750 {
4751 	printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4752 	       tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4753 	printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4754 	       tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4755 }
4756 
tg3_tx_timeout(struct net_device * dev)4757 static void tg3_tx_timeout(struct net_device *dev)
4758 {
4759 	struct tg3 *tp = netdev_priv(dev);
4760 
4761 	if (netif_msg_tx_err(tp)) {
4762 		printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4763 		       dev->name);
4764 		tg3_dump_short_state(tp);
4765 	}
4766 
4767 	schedule_work(&tp->reset_task);
4768 }
4769 
4770 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
tg3_4g_overflow_test(dma_addr_t mapping,int len)4771 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4772 {
4773 	u32 base = (u32) mapping & 0xffffffff;
4774 
4775 	return ((base > 0xffffdcc0) &&
4776 		(base + len + 8 < base));
4777 }
4778 
4779 /* Test for DMA addresses > 40-bit */
tg3_40bit_overflow_test(struct tg3 * tp,dma_addr_t mapping,int len)4780 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4781 					  int len)
4782 {
4783 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4784 	if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4785 		return (((u64) mapping + len) > DMA_40BIT_MASK);
4786 	return 0;
4787 #else
4788 	return 0;
4789 #endif
4790 }
4791 
4792 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4793 
4794 /* Workaround 4GB and 40-bit hardware DMA bugs. */
tigon3_dma_hwbug_workaround(struct tg3 * tp,struct sk_buff * skb,u32 last_plus_one,u32 * start,u32 base_flags,u32 mss)4795 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4796 				       u32 last_plus_one, u32 *start,
4797 				       u32 base_flags, u32 mss)
4798 {
4799 	struct sk_buff *new_skb;
4800 	dma_addr_t new_addr = 0;
4801 	u32 entry = *start;
4802 	int i, ret = 0;
4803 
4804 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4805 		new_skb = skb_copy(skb, GFP_ATOMIC);
4806 	else {
4807 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
4808 
4809 		new_skb = skb_copy_expand(skb,
4810 					  skb_headroom(skb) + more_headroom,
4811 					  skb_tailroom(skb), GFP_ATOMIC);
4812 	}
4813 
4814 	if (!new_skb) {
4815 		ret = -1;
4816 	} else {
4817 		/* New SKB is guaranteed to be linear. */
4818 		entry = *start;
4819 		ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4820 		new_addr = skb_shinfo(new_skb)->dma_maps[0];
4821 
4822 		/* Make sure new skb does not cross any 4G boundaries.
4823 		 * Drop the packet if it does.
4824 		 */
4825 		if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4826 			if (!ret)
4827 				skb_dma_unmap(&tp->pdev->dev, new_skb,
4828 					      DMA_TO_DEVICE);
4829 			ret = -1;
4830 			dev_kfree_skb(new_skb);
4831 			new_skb = NULL;
4832 		} else {
4833 			tg3_set_txd(tp, entry, new_addr, new_skb->len,
4834 				    base_flags, 1 | (mss << 1));
4835 			*start = NEXT_TX(entry);
4836 		}
4837 	}
4838 
4839 	/* Now clean up the sw ring entries. */
4840 	i = 0;
4841 	while (entry != last_plus_one) {
4842 		if (i == 0) {
4843 			tp->tx_buffers[entry].skb = new_skb;
4844 		} else {
4845 			tp->tx_buffers[entry].skb = NULL;
4846 		}
4847 		entry = NEXT_TX(entry);
4848 		i++;
4849 	}
4850 
4851 	skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4852 	dev_kfree_skb(skb);
4853 
4854 	return ret;
4855 }
4856 
tg3_set_txd(struct tg3 * tp,int entry,dma_addr_t mapping,int len,u32 flags,u32 mss_and_is_end)4857 static void tg3_set_txd(struct tg3 *tp, int entry,
4858 			dma_addr_t mapping, int len, u32 flags,
4859 			u32 mss_and_is_end)
4860 {
4861 	struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4862 	int is_end = (mss_and_is_end & 0x1);
4863 	u32 mss = (mss_and_is_end >> 1);
4864 	u32 vlan_tag = 0;
4865 
4866 	if (is_end)
4867 		flags |= TXD_FLAG_END;
4868 	if (flags & TXD_FLAG_VLAN) {
4869 		vlan_tag = flags >> 16;
4870 		flags &= 0xffff;
4871 	}
4872 	vlan_tag |= (mss << TXD_MSS_SHIFT);
4873 
4874 	txd->addr_hi = ((u64) mapping >> 32);
4875 	txd->addr_lo = ((u64) mapping & 0xffffffff);
4876 	txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4877 	txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4878 }
4879 
4880 /* hard_start_xmit for devices that don't have any bugs and
4881  * support TG3_FLG2_HW_TSO_2 only.
4882  */
tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)4883 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4884 {
4885 	struct tg3 *tp = netdev_priv(dev);
4886 	u32 len, entry, base_flags, mss;
4887 	struct skb_shared_info *sp;
4888 	dma_addr_t mapping;
4889 
4890 	len = skb_headlen(skb);
4891 
4892 	/* We are running in BH disabled context with netif_tx_lock
4893 	 * and TX reclaim runs via tp->napi.poll inside of a software
4894 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
4895 	 * no IRQ context deadlocks to worry about either.  Rejoice!
4896 	 */
4897 	if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4898 		if (!netif_queue_stopped(dev)) {
4899 			netif_stop_queue(dev);
4900 
4901 			/* This is a hard error, log it. */
4902 			printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4903 			       "queue awake!\n", dev->name);
4904 		}
4905 		return NETDEV_TX_BUSY;
4906 	}
4907 
4908 	entry = tp->tx_prod;
4909 	base_flags = 0;
4910 	mss = 0;
4911 	if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4912 		int tcp_opt_len, ip_tcp_len;
4913 
4914 		if (skb_header_cloned(skb) &&
4915 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4916 			dev_kfree_skb(skb);
4917 			goto out_unlock;
4918 		}
4919 
4920 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4921 			mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4922 		else {
4923 			struct iphdr *iph = ip_hdr(skb);
4924 
4925 			tcp_opt_len = tcp_optlen(skb);
4926 			ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4927 
4928 			iph->check = 0;
4929 			iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4930 			mss |= (ip_tcp_len + tcp_opt_len) << 9;
4931 		}
4932 
4933 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4934 			       TXD_FLAG_CPU_POST_DMA);
4935 
4936 		tcp_hdr(skb)->check = 0;
4937 
4938 	}
4939 	else if (skb->ip_summed == CHECKSUM_PARTIAL)
4940 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
4941 #if TG3_VLAN_TAG_USED
4942 	if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4943 		base_flags |= (TXD_FLAG_VLAN |
4944 			       (vlan_tx_tag_get(skb) << 16));
4945 #endif
4946 
4947 	if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4948 		dev_kfree_skb(skb);
4949 		goto out_unlock;
4950 	}
4951 
4952 	sp = skb_shinfo(skb);
4953 
4954 	mapping = sp->dma_maps[0];
4955 
4956 	tp->tx_buffers[entry].skb = skb;
4957 
4958 	tg3_set_txd(tp, entry, mapping, len, base_flags,
4959 		    (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4960 
4961 	entry = NEXT_TX(entry);
4962 
4963 	/* Now loop through additional data fragments, and queue them. */
4964 	if (skb_shinfo(skb)->nr_frags > 0) {
4965 		unsigned int i, last;
4966 
4967 		last = skb_shinfo(skb)->nr_frags - 1;
4968 		for (i = 0; i <= last; i++) {
4969 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4970 
4971 			len = frag->size;
4972 			mapping = sp->dma_maps[i + 1];
4973 			tp->tx_buffers[entry].skb = NULL;
4974 
4975 			tg3_set_txd(tp, entry, mapping, len,
4976 				    base_flags, (i == last) | (mss << 1));
4977 
4978 			entry = NEXT_TX(entry);
4979 		}
4980 	}
4981 
4982 	/* Packets are ready, update Tx producer idx local and on card. */
4983 	tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4984 
4985 	tp->tx_prod = entry;
4986 	if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4987 		netif_stop_queue(dev);
4988 		if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4989 			netif_wake_queue(tp->dev);
4990 	}
4991 
4992 out_unlock:
4993     	mmiowb();
4994 
4995 	dev->trans_start = jiffies;
4996 
4997 	return NETDEV_TX_OK;
4998 }
4999 
5000 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
5001 
5002 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5003  * TSO header is greater than 80 bytes.
5004  */
tg3_tso_bug(struct tg3 * tp,struct sk_buff * skb)5005 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5006 {
5007 	struct sk_buff *segs, *nskb;
5008 
5009 	/* Estimate the number of fragments in the worst case */
5010 	if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
5011 		netif_stop_queue(tp->dev);
5012 		if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
5013 			return NETDEV_TX_BUSY;
5014 
5015 		netif_wake_queue(tp->dev);
5016 	}
5017 
5018 	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5019 	if (IS_ERR(segs))
5020 		goto tg3_tso_bug_end;
5021 
5022 	do {
5023 		nskb = segs;
5024 		segs = segs->next;
5025 		nskb->next = NULL;
5026 		tg3_start_xmit_dma_bug(nskb, tp->dev);
5027 	} while (segs);
5028 
5029 tg3_tso_bug_end:
5030 	dev_kfree_skb(skb);
5031 
5032 	return NETDEV_TX_OK;
5033 }
5034 
5035 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5036  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5037  */
tg3_start_xmit_dma_bug(struct sk_buff * skb,struct net_device * dev)5038 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5039 {
5040 	struct tg3 *tp = netdev_priv(dev);
5041 	u32 len, entry, base_flags, mss;
5042 	struct skb_shared_info *sp;
5043 	int would_hit_hwbug;
5044 	dma_addr_t mapping;
5045 
5046 	len = skb_headlen(skb);
5047 
5048 	/* We are running in BH disabled context with netif_tx_lock
5049 	 * and TX reclaim runs via tp->napi.poll inside of a software
5050 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
5051 	 * no IRQ context deadlocks to worry about either.  Rejoice!
5052 	 */
5053 	if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5054 		if (!netif_queue_stopped(dev)) {
5055 			netif_stop_queue(dev);
5056 
5057 			/* This is a hard error, log it. */
5058 			printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5059 			       "queue awake!\n", dev->name);
5060 		}
5061 		return NETDEV_TX_BUSY;
5062 	}
5063 
5064 	entry = tp->tx_prod;
5065 	base_flags = 0;
5066 	if (skb->ip_summed == CHECKSUM_PARTIAL)
5067 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
5068 	mss = 0;
5069 	if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5070 		struct iphdr *iph;
5071 		int tcp_opt_len, ip_tcp_len, hdr_len;
5072 
5073 		if (skb_header_cloned(skb) &&
5074 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5075 			dev_kfree_skb(skb);
5076 			goto out_unlock;
5077 		}
5078 
5079 		tcp_opt_len = tcp_optlen(skb);
5080 		ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5081 
5082 		hdr_len = ip_tcp_len + tcp_opt_len;
5083 		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5084 			     (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5085 			return (tg3_tso_bug(tp, skb));
5086 
5087 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5088 			       TXD_FLAG_CPU_POST_DMA);
5089 
5090 		iph = ip_hdr(skb);
5091 		iph->check = 0;
5092 		iph->tot_len = htons(mss + hdr_len);
5093 		if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5094 			tcp_hdr(skb)->check = 0;
5095 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5096 		} else
5097 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5098 								 iph->daddr, 0,
5099 								 IPPROTO_TCP,
5100 								 0);
5101 
5102 		if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5103 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
5104 			if (tcp_opt_len || iph->ihl > 5) {
5105 				int tsflags;
5106 
5107 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5108 				mss |= (tsflags << 11);
5109 			}
5110 		} else {
5111 			if (tcp_opt_len || iph->ihl > 5) {
5112 				int tsflags;
5113 
5114 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5115 				base_flags |= tsflags << 12;
5116 			}
5117 		}
5118 	}
5119 #if TG3_VLAN_TAG_USED
5120 	if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5121 		base_flags |= (TXD_FLAG_VLAN |
5122 			       (vlan_tx_tag_get(skb) << 16));
5123 #endif
5124 
5125 	if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5126 		dev_kfree_skb(skb);
5127 		goto out_unlock;
5128 	}
5129 
5130 	sp = skb_shinfo(skb);
5131 
5132 	mapping = sp->dma_maps[0];
5133 
5134 	tp->tx_buffers[entry].skb = skb;
5135 
5136 	would_hit_hwbug = 0;
5137 
5138 	if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5139 		would_hit_hwbug = 1;
5140 	else if (tg3_4g_overflow_test(mapping, len))
5141 		would_hit_hwbug = 1;
5142 
5143 	tg3_set_txd(tp, entry, mapping, len, base_flags,
5144 		    (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5145 
5146 	entry = NEXT_TX(entry);
5147 
5148 	/* Now loop through additional data fragments, and queue them. */
5149 	if (skb_shinfo(skb)->nr_frags > 0) {
5150 		unsigned int i, last;
5151 
5152 		last = skb_shinfo(skb)->nr_frags - 1;
5153 		for (i = 0; i <= last; i++) {
5154 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5155 
5156 			len = frag->size;
5157 			mapping = sp->dma_maps[i + 1];
5158 
5159 			tp->tx_buffers[entry].skb = NULL;
5160 
5161 			if (tg3_4g_overflow_test(mapping, len))
5162 				would_hit_hwbug = 1;
5163 
5164 			if (tg3_40bit_overflow_test(tp, mapping, len))
5165 				would_hit_hwbug = 1;
5166 
5167 			if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5168 				tg3_set_txd(tp, entry, mapping, len,
5169 					    base_flags, (i == last)|(mss << 1));
5170 			else
5171 				tg3_set_txd(tp, entry, mapping, len,
5172 					    base_flags, (i == last));
5173 
5174 			entry = NEXT_TX(entry);
5175 		}
5176 	}
5177 
5178 	if (would_hit_hwbug) {
5179 		u32 last_plus_one = entry;
5180 		u32 start;
5181 
5182 		start = entry - 1 - skb_shinfo(skb)->nr_frags;
5183 		start &= (TG3_TX_RING_SIZE - 1);
5184 
5185 		/* If the workaround fails due to memory/mapping
5186 		 * failure, silently drop this packet.
5187 		 */
5188 		if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5189 						&start, base_flags, mss))
5190 			goto out_unlock;
5191 
5192 		entry = start;
5193 	}
5194 
5195 	/* Packets are ready, update Tx producer idx local and on card. */
5196 	tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5197 
5198 	tp->tx_prod = entry;
5199 	if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5200 		netif_stop_queue(dev);
5201 		if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5202 			netif_wake_queue(tp->dev);
5203 	}
5204 
5205 out_unlock:
5206     	mmiowb();
5207 
5208 	dev->trans_start = jiffies;
5209 
5210 	return NETDEV_TX_OK;
5211 }
5212 
tg3_set_mtu(struct net_device * dev,struct tg3 * tp,int new_mtu)5213 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5214 			       int new_mtu)
5215 {
5216 	dev->mtu = new_mtu;
5217 
5218 	if (new_mtu > ETH_DATA_LEN) {
5219 		if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5220 			tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5221 			ethtool_op_set_tso(dev, 0);
5222 		}
5223 		else
5224 			tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5225 	} else {
5226 		if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5227 			tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5228 		tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5229 	}
5230 }
5231 
tg3_change_mtu(struct net_device * dev,int new_mtu)5232 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5233 {
5234 	struct tg3 *tp = netdev_priv(dev);
5235 	int err;
5236 
5237 	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5238 		return -EINVAL;
5239 
5240 	if (!netif_running(dev)) {
5241 		/* We'll just catch it later when the
5242 		 * device is up'd.
5243 		 */
5244 		tg3_set_mtu(dev, tp, new_mtu);
5245 		return 0;
5246 	}
5247 
5248 	tg3_phy_stop(tp);
5249 
5250 	tg3_netif_stop(tp);
5251 
5252 	tg3_full_lock(tp, 1);
5253 
5254 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5255 
5256 	tg3_set_mtu(dev, tp, new_mtu);
5257 
5258 	err = tg3_restart_hw(tp, 0);
5259 
5260 	if (!err)
5261 		tg3_netif_start(tp);
5262 
5263 	tg3_full_unlock(tp);
5264 
5265 	if (!err)
5266 		tg3_phy_start(tp);
5267 
5268 	return err;
5269 }
5270 
5271 /* Free up pending packets in all rx/tx rings.
5272  *
5273  * The chip has been shut down and the driver detached from
5274  * the networking, so no interrupts or new tx packets will
5275  * end up in the driver.  tp->{tx,}lock is not held and we are not
5276  * in an interrupt context and thus may sleep.
5277  */
tg3_free_rings(struct tg3 * tp)5278 static void tg3_free_rings(struct tg3 *tp)
5279 {
5280 	struct ring_info *rxp;
5281 	int i;
5282 
5283 	for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5284 		rxp = &tp->rx_std_buffers[i];
5285 
5286 		if (rxp->skb == NULL)
5287 			continue;
5288 		pci_unmap_single(tp->pdev,
5289 				 pci_unmap_addr(rxp, mapping),
5290 				 tp->rx_pkt_buf_sz - tp->rx_offset,
5291 				 PCI_DMA_FROMDEVICE);
5292 		dev_kfree_skb_any(rxp->skb);
5293 		rxp->skb = NULL;
5294 	}
5295 
5296 	for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5297 		rxp = &tp->rx_jumbo_buffers[i];
5298 
5299 		if (rxp->skb == NULL)
5300 			continue;
5301 		pci_unmap_single(tp->pdev,
5302 				 pci_unmap_addr(rxp, mapping),
5303 				 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5304 				 PCI_DMA_FROMDEVICE);
5305 		dev_kfree_skb_any(rxp->skb);
5306 		rxp->skb = NULL;
5307 	}
5308 
5309 	for (i = 0; i < TG3_TX_RING_SIZE; ) {
5310 		struct tx_ring_info *txp;
5311 		struct sk_buff *skb;
5312 
5313 		txp = &tp->tx_buffers[i];
5314 		skb = txp->skb;
5315 
5316 		if (skb == NULL) {
5317 			i++;
5318 			continue;
5319 		}
5320 
5321 		skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5322 
5323 		txp->skb = NULL;
5324 
5325 		i += skb_shinfo(skb)->nr_frags + 1;
5326 
5327 		dev_kfree_skb_any(skb);
5328 	}
5329 }
5330 
5331 /* Initialize tx/rx rings for packet processing.
5332  *
5333  * The chip has been shut down and the driver detached from
5334  * the networking, so no interrupts or new tx packets will
5335  * end up in the driver.  tp->{tx,}lock are held and thus
5336  * we may not sleep.
5337  */
tg3_init_rings(struct tg3 * tp)5338 static int tg3_init_rings(struct tg3 *tp)
5339 {
5340 	u32 i;
5341 
5342 	/* Free up all the SKBs. */
5343 	tg3_free_rings(tp);
5344 
5345 	/* Zero out all descriptors. */
5346 	memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5347 	memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5348 	memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5349 	memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5350 
5351 	tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5352 	if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5353 	    (tp->dev->mtu > ETH_DATA_LEN))
5354 		tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5355 
5356 	/* Initialize invariants of the rings, we only set this
5357 	 * stuff once.  This works because the card does not
5358 	 * write into the rx buffer posting rings.
5359 	 */
5360 	for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5361 		struct tg3_rx_buffer_desc *rxd;
5362 
5363 		rxd = &tp->rx_std[i];
5364 		rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5365 			<< RXD_LEN_SHIFT;
5366 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5367 		rxd->opaque = (RXD_OPAQUE_RING_STD |
5368 			       (i << RXD_OPAQUE_INDEX_SHIFT));
5369 	}
5370 
5371 	if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5372 		for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5373 			struct tg3_rx_buffer_desc *rxd;
5374 
5375 			rxd = &tp->rx_jumbo[i];
5376 			rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5377 				<< RXD_LEN_SHIFT;
5378 			rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5379 				RXD_FLAG_JUMBO;
5380 			rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5381 			       (i << RXD_OPAQUE_INDEX_SHIFT));
5382 		}
5383 	}
5384 
5385 	/* Now allocate fresh SKBs for each rx ring. */
5386 	for (i = 0; i < tp->rx_pending; i++) {
5387 		if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5388 			printk(KERN_WARNING PFX
5389 			       "%s: Using a smaller RX standard ring, "
5390 			       "only %d out of %d buffers were allocated "
5391 			       "successfully.\n",
5392 			       tp->dev->name, i, tp->rx_pending);
5393 			if (i == 0)
5394 				return -ENOMEM;
5395 			tp->rx_pending = i;
5396 			break;
5397 		}
5398 	}
5399 
5400 	if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5401 		for (i = 0; i < tp->rx_jumbo_pending; i++) {
5402 			if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5403 					     -1, i) < 0) {
5404 				printk(KERN_WARNING PFX
5405 				       "%s: Using a smaller RX jumbo ring, "
5406 				       "only %d out of %d buffers were "
5407 				       "allocated successfully.\n",
5408 				       tp->dev->name, i, tp->rx_jumbo_pending);
5409 				if (i == 0) {
5410 					tg3_free_rings(tp);
5411 					return -ENOMEM;
5412 				}
5413 				tp->rx_jumbo_pending = i;
5414 				break;
5415 			}
5416 		}
5417 	}
5418 	return 0;
5419 }
5420 
5421 /*
5422  * Must not be invoked with interrupt sources disabled and
5423  * the hardware shutdown down.
5424  */
tg3_free_consistent(struct tg3 * tp)5425 static void tg3_free_consistent(struct tg3 *tp)
5426 {
5427 	kfree(tp->rx_std_buffers);
5428 	tp->rx_std_buffers = NULL;
5429 	if (tp->rx_std) {
5430 		pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5431 				    tp->rx_std, tp->rx_std_mapping);
5432 		tp->rx_std = NULL;
5433 	}
5434 	if (tp->rx_jumbo) {
5435 		pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5436 				    tp->rx_jumbo, tp->rx_jumbo_mapping);
5437 		tp->rx_jumbo = NULL;
5438 	}
5439 	if (tp->rx_rcb) {
5440 		pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5441 				    tp->rx_rcb, tp->rx_rcb_mapping);
5442 		tp->rx_rcb = NULL;
5443 	}
5444 	if (tp->tx_ring) {
5445 		pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5446 			tp->tx_ring, tp->tx_desc_mapping);
5447 		tp->tx_ring = NULL;
5448 	}
5449 	if (tp->hw_status) {
5450 		pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5451 				    tp->hw_status, tp->status_mapping);
5452 		tp->hw_status = NULL;
5453 	}
5454 	if (tp->hw_stats) {
5455 		pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5456 				    tp->hw_stats, tp->stats_mapping);
5457 		tp->hw_stats = NULL;
5458 	}
5459 }
5460 
5461 /*
5462  * Must not be invoked with interrupt sources disabled and
5463  * the hardware shutdown down.  Can sleep.
5464  */
tg3_alloc_consistent(struct tg3 * tp)5465 static int tg3_alloc_consistent(struct tg3 *tp)
5466 {
5467 	tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5468 				      (TG3_RX_RING_SIZE +
5469 				       TG3_RX_JUMBO_RING_SIZE)) +
5470 				     (sizeof(struct tx_ring_info) *
5471 				      TG3_TX_RING_SIZE),
5472 				     GFP_KERNEL);
5473 	if (!tp->rx_std_buffers)
5474 		return -ENOMEM;
5475 
5476 	tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5477 	tp->tx_buffers = (struct tx_ring_info *)
5478 		&tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5479 
5480 	tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5481 					  &tp->rx_std_mapping);
5482 	if (!tp->rx_std)
5483 		goto err_out;
5484 
5485 	tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5486 					    &tp->rx_jumbo_mapping);
5487 
5488 	if (!tp->rx_jumbo)
5489 		goto err_out;
5490 
5491 	tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5492 					  &tp->rx_rcb_mapping);
5493 	if (!tp->rx_rcb)
5494 		goto err_out;
5495 
5496 	tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5497 					   &tp->tx_desc_mapping);
5498 	if (!tp->tx_ring)
5499 		goto err_out;
5500 
5501 	tp->hw_status = pci_alloc_consistent(tp->pdev,
5502 					     TG3_HW_STATUS_SIZE,
5503 					     &tp->status_mapping);
5504 	if (!tp->hw_status)
5505 		goto err_out;
5506 
5507 	tp->hw_stats = pci_alloc_consistent(tp->pdev,
5508 					    sizeof(struct tg3_hw_stats),
5509 					    &tp->stats_mapping);
5510 	if (!tp->hw_stats)
5511 		goto err_out;
5512 
5513 	memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5514 	memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5515 
5516 	return 0;
5517 
5518 err_out:
5519 	tg3_free_consistent(tp);
5520 	return -ENOMEM;
5521 }
5522 
5523 #define MAX_WAIT_CNT 1000
5524 
5525 /* To stop a block, clear the enable bit and poll till it
5526  * clears.  tp->lock is held.
5527  */
tg3_stop_block(struct tg3 * tp,unsigned long ofs,u32 enable_bit,int silent)5528 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5529 {
5530 	unsigned int i;
5531 	u32 val;
5532 
5533 	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5534 		switch (ofs) {
5535 		case RCVLSC_MODE:
5536 		case DMAC_MODE:
5537 		case MBFREE_MODE:
5538 		case BUFMGR_MODE:
5539 		case MEMARB_MODE:
5540 			/* We can't enable/disable these bits of the
5541 			 * 5705/5750, just say success.
5542 			 */
5543 			return 0;
5544 
5545 		default:
5546 			break;
5547 		}
5548 	}
5549 
5550 	val = tr32(ofs);
5551 	val &= ~enable_bit;
5552 	tw32_f(ofs, val);
5553 
5554 	for (i = 0; i < MAX_WAIT_CNT; i++) {
5555 		udelay(100);
5556 		val = tr32(ofs);
5557 		if ((val & enable_bit) == 0)
5558 			break;
5559 	}
5560 
5561 	if (i == MAX_WAIT_CNT && !silent) {
5562 		printk(KERN_ERR PFX "tg3_stop_block timed out, "
5563 		       "ofs=%lx enable_bit=%x\n",
5564 		       ofs, enable_bit);
5565 		return -ENODEV;
5566 	}
5567 
5568 	return 0;
5569 }
5570 
5571 /* tp->lock is held. */
tg3_abort_hw(struct tg3 * tp,int silent)5572 static int tg3_abort_hw(struct tg3 *tp, int silent)
5573 {
5574 	int i, err;
5575 
5576 	tg3_disable_ints(tp);
5577 
5578 	tp->rx_mode &= ~RX_MODE_ENABLE;
5579 	tw32_f(MAC_RX_MODE, tp->rx_mode);
5580 	udelay(10);
5581 
5582 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5583 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5584 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5585 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5586 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5587 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5588 
5589 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5590 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5591 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5592 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5593 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5594 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5595 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5596 
5597 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5598 	tw32_f(MAC_MODE, tp->mac_mode);
5599 	udelay(40);
5600 
5601 	tp->tx_mode &= ~TX_MODE_ENABLE;
5602 	tw32_f(MAC_TX_MODE, tp->tx_mode);
5603 
5604 	for (i = 0; i < MAX_WAIT_CNT; i++) {
5605 		udelay(100);
5606 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5607 			break;
5608 	}
5609 	if (i >= MAX_WAIT_CNT) {
5610 		printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5611 		       "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5612 		       tp->dev->name, tr32(MAC_TX_MODE));
5613 		err |= -ENODEV;
5614 	}
5615 
5616 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5617 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5618 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5619 
5620 	tw32(FTQ_RESET, 0xffffffff);
5621 	tw32(FTQ_RESET, 0x00000000);
5622 
5623 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5624 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5625 
5626 	if (tp->hw_status)
5627 		memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5628 	if (tp->hw_stats)
5629 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5630 
5631 	return err;
5632 }
5633 
5634 /* tp->lock is held. */
tg3_nvram_lock(struct tg3 * tp)5635 static int tg3_nvram_lock(struct tg3 *tp)
5636 {
5637 	if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5638 		int i;
5639 
5640 		if (tp->nvram_lock_cnt == 0) {
5641 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5642 			for (i = 0; i < 8000; i++) {
5643 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5644 					break;
5645 				udelay(20);
5646 			}
5647 			if (i == 8000) {
5648 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5649 				return -ENODEV;
5650 			}
5651 		}
5652 		tp->nvram_lock_cnt++;
5653 	}
5654 	return 0;
5655 }
5656 
5657 /* tp->lock is held. */
tg3_nvram_unlock(struct tg3 * tp)5658 static void tg3_nvram_unlock(struct tg3 *tp)
5659 {
5660 	if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5661 		if (tp->nvram_lock_cnt > 0)
5662 			tp->nvram_lock_cnt--;
5663 		if (tp->nvram_lock_cnt == 0)
5664 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5665 	}
5666 }
5667 
5668 /* tp->lock is held. */
tg3_enable_nvram_access(struct tg3 * tp)5669 static void tg3_enable_nvram_access(struct tg3 *tp)
5670 {
5671 	if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5672 	    !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5673 		u32 nvaccess = tr32(NVRAM_ACCESS);
5674 
5675 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5676 	}
5677 }
5678 
5679 /* tp->lock is held. */
tg3_disable_nvram_access(struct tg3 * tp)5680 static void tg3_disable_nvram_access(struct tg3 *tp)
5681 {
5682 	if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5683 	    !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5684 		u32 nvaccess = tr32(NVRAM_ACCESS);
5685 
5686 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5687 	}
5688 }
5689 
tg3_ape_send_event(struct tg3 * tp,u32 event)5690 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5691 {
5692 	int i;
5693 	u32 apedata;
5694 
5695 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5696 	if (apedata != APE_SEG_SIG_MAGIC)
5697 		return;
5698 
5699 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5700 	if (!(apedata & APE_FW_STATUS_READY))
5701 		return;
5702 
5703 	/* Wait for up to 1 millisecond for APE to service previous event. */
5704 	for (i = 0; i < 10; i++) {
5705 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5706 			return;
5707 
5708 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5709 
5710 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5711 			tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5712 					event | APE_EVENT_STATUS_EVENT_PENDING);
5713 
5714 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5715 
5716 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5717 			break;
5718 
5719 		udelay(100);
5720 	}
5721 
5722 	if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5723 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5724 }
5725 
tg3_ape_driver_state_change(struct tg3 * tp,int kind)5726 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5727 {
5728 	u32 event;
5729 	u32 apedata;
5730 
5731 	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5732 		return;
5733 
5734 	switch (kind) {
5735 		case RESET_KIND_INIT:
5736 			tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5737 					APE_HOST_SEG_SIG_MAGIC);
5738 			tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5739 					APE_HOST_SEG_LEN_MAGIC);
5740 			apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5741 			tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5742 			tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5743 					APE_HOST_DRIVER_ID_MAGIC);
5744 			tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5745 					APE_HOST_BEHAV_NO_PHYLOCK);
5746 
5747 			event = APE_EVENT_STATUS_STATE_START;
5748 			break;
5749 		case RESET_KIND_SHUTDOWN:
5750 			/* With the interface we are currently using,
5751 			 * APE does not track driver state.  Wiping
5752 			 * out the HOST SEGMENT SIGNATURE forces
5753 			 * the APE to assume OS absent status.
5754 			 */
5755 			tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5756 
5757 			event = APE_EVENT_STATUS_STATE_UNLOAD;
5758 			break;
5759 		case RESET_KIND_SUSPEND:
5760 			event = APE_EVENT_STATUS_STATE_SUSPEND;
5761 			break;
5762 		default:
5763 			return;
5764 	}
5765 
5766 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5767 
5768 	tg3_ape_send_event(tp, event);
5769 }
5770 
5771 /* tp->lock is held. */
tg3_write_sig_pre_reset(struct tg3 * tp,int kind)5772 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5773 {
5774 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5775 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5776 
5777 	if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5778 		switch (kind) {
5779 		case RESET_KIND_INIT:
5780 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5781 				      DRV_STATE_START);
5782 			break;
5783 
5784 		case RESET_KIND_SHUTDOWN:
5785 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5786 				      DRV_STATE_UNLOAD);
5787 			break;
5788 
5789 		case RESET_KIND_SUSPEND:
5790 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5791 				      DRV_STATE_SUSPEND);
5792 			break;
5793 
5794 		default:
5795 			break;
5796 		}
5797 	}
5798 
5799 	if (kind == RESET_KIND_INIT ||
5800 	    kind == RESET_KIND_SUSPEND)
5801 		tg3_ape_driver_state_change(tp, kind);
5802 }
5803 
5804 /* tp->lock is held. */
tg3_write_sig_post_reset(struct tg3 * tp,int kind)5805 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5806 {
5807 	if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5808 		switch (kind) {
5809 		case RESET_KIND_INIT:
5810 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5811 				      DRV_STATE_START_DONE);
5812 			break;
5813 
5814 		case RESET_KIND_SHUTDOWN:
5815 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5816 				      DRV_STATE_UNLOAD_DONE);
5817 			break;
5818 
5819 		default:
5820 			break;
5821 		}
5822 	}
5823 
5824 	if (kind == RESET_KIND_SHUTDOWN)
5825 		tg3_ape_driver_state_change(tp, kind);
5826 }
5827 
5828 /* tp->lock is held. */
tg3_write_sig_legacy(struct tg3 * tp,int kind)5829 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5830 {
5831 	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5832 		switch (kind) {
5833 		case RESET_KIND_INIT:
5834 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5835 				      DRV_STATE_START);
5836 			break;
5837 
5838 		case RESET_KIND_SHUTDOWN:
5839 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5840 				      DRV_STATE_UNLOAD);
5841 			break;
5842 
5843 		case RESET_KIND_SUSPEND:
5844 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5845 				      DRV_STATE_SUSPEND);
5846 			break;
5847 
5848 		default:
5849 			break;
5850 		}
5851 	}
5852 }
5853 
tg3_poll_fw(struct tg3 * tp)5854 static int tg3_poll_fw(struct tg3 *tp)
5855 {
5856 	int i;
5857 	u32 val;
5858 
5859 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5860 		/* Wait up to 20ms for init done. */
5861 		for (i = 0; i < 200; i++) {
5862 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5863 				return 0;
5864 			udelay(100);
5865 		}
5866 		return -ENODEV;
5867 	}
5868 
5869 	/* Wait for firmware initialization to complete. */
5870 	for (i = 0; i < 100000; i++) {
5871 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5872 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5873 			break;
5874 		udelay(10);
5875 	}
5876 
5877 	/* Chip might not be fitted with firmware.  Some Sun onboard
5878 	 * parts are configured like that.  So don't signal the timeout
5879 	 * of the above loop as an error, but do report the lack of
5880 	 * running firmware once.
5881 	 */
5882 	if (i >= 100000 &&
5883 	    !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5884 		tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5885 
5886 		printk(KERN_INFO PFX "%s: No firmware running.\n",
5887 		       tp->dev->name);
5888 	}
5889 
5890 	return 0;
5891 }
5892 
5893 /* Save PCI command register before chip reset */
tg3_save_pci_state(struct tg3 * tp)5894 static void tg3_save_pci_state(struct tg3 *tp)
5895 {
5896 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5897 }
5898 
5899 /* Restore PCI state after chip reset */
tg3_restore_pci_state(struct tg3 * tp)5900 static void tg3_restore_pci_state(struct tg3 *tp)
5901 {
5902 	u32 val;
5903 
5904 	/* Re-enable indirect register accesses. */
5905 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5906 			       tp->misc_host_ctrl);
5907 
5908 	/* Set MAX PCI retry to zero. */
5909 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5910 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5911 	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5912 		val |= PCISTATE_RETRY_SAME_DMA;
5913 	/* Allow reads and writes to the APE register and memory space. */
5914 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5915 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5916 		       PCISTATE_ALLOW_APE_SHMEM_WR;
5917 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5918 
5919 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5920 
5921 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
5922 		if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5923 			pcie_set_readrq(tp->pdev, 4096);
5924 		else {
5925 			pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5926 					      tp->pci_cacheline_sz);
5927 			pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5928 					      tp->pci_lat_timer);
5929 		}
5930 	}
5931 
5932 	/* Make sure PCI-X relaxed ordering bit is clear. */
5933 	if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
5934 		u16 pcix_cmd;
5935 
5936 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5937 				     &pcix_cmd);
5938 		pcix_cmd &= ~PCI_X_CMD_ERO;
5939 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5940 				      pcix_cmd);
5941 	}
5942 
5943 	if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5944 
5945 		/* Chip reset on 5780 will reset MSI enable bit,
5946 		 * so need to restore it.
5947 		 */
5948 		if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5949 			u16 ctrl;
5950 
5951 			pci_read_config_word(tp->pdev,
5952 					     tp->msi_cap + PCI_MSI_FLAGS,
5953 					     &ctrl);
5954 			pci_write_config_word(tp->pdev,
5955 					      tp->msi_cap + PCI_MSI_FLAGS,
5956 					      ctrl | PCI_MSI_FLAGS_ENABLE);
5957 			val = tr32(MSGINT_MODE);
5958 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5959 		}
5960 	}
5961 }
5962 
5963 static void tg3_stop_fw(struct tg3 *);
5964 
5965 /* tp->lock is held. */
tg3_chip_reset(struct tg3 * tp)5966 static int tg3_chip_reset(struct tg3 *tp)
5967 {
5968 	u32 val;
5969 	void (*write_op)(struct tg3 *, u32, u32);
5970 	int err;
5971 
5972 	tg3_nvram_lock(tp);
5973 
5974 	tg3_mdio_stop(tp);
5975 
5976 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5977 
5978 	/* No matching tg3_nvram_unlock() after this because
5979 	 * chip reset below will undo the nvram lock.
5980 	 */
5981 	tp->nvram_lock_cnt = 0;
5982 
5983 	/* GRC_MISC_CFG core clock reset will clear the memory
5984 	 * enable bit in PCI register 4 and the MSI enable bit
5985 	 * on some chips, so we save relevant registers here.
5986 	 */
5987 	tg3_save_pci_state(tp);
5988 
5989 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5990 	    (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
5991 		tw32(GRC_FASTBOOT_PC, 0);
5992 
5993 	/*
5994 	 * We must avoid the readl() that normally takes place.
5995 	 * It locks machines, causes machine checks, and other
5996 	 * fun things.  So, temporarily disable the 5701
5997 	 * hardware workaround, while we do the reset.
5998 	 */
5999 	write_op = tp->write32;
6000 	if (write_op == tg3_write_flush_reg32)
6001 		tp->write32 = tg3_write32;
6002 
6003 	/* Prevent the irq handler from reading or writing PCI registers
6004 	 * during chip reset when the memory enable bit in the PCI command
6005 	 * register may be cleared.  The chip does not generate interrupt
6006 	 * at this time, but the irq handler may still be called due to irq
6007 	 * sharing or irqpoll.
6008 	 */
6009 	tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6010 	if (tp->hw_status) {
6011 		tp->hw_status->status = 0;
6012 		tp->hw_status->status_tag = 0;
6013 	}
6014 	tp->last_tag = 0;
6015 	smp_mb();
6016 	synchronize_irq(tp->pdev->irq);
6017 
6018 	/* do the reset */
6019 	val = GRC_MISC_CFG_CORECLK_RESET;
6020 
6021 	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6022 		if (tr32(0x7e2c) == 0x60) {
6023 			tw32(0x7e2c, 0x20);
6024 		}
6025 		if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6026 			tw32(GRC_MISC_CFG, (1 << 29));
6027 			val |= (1 << 29);
6028 		}
6029 	}
6030 
6031 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6032 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6033 		tw32(GRC_VCPU_EXT_CTRL,
6034 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6035 	}
6036 
6037 	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6038 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6039 	tw32(GRC_MISC_CFG, val);
6040 
6041 	/* restore 5701 hardware bug workaround write method */
6042 	tp->write32 = write_op;
6043 
6044 	/* Unfortunately, we have to delay before the PCI read back.
6045 	 * Some 575X chips even will not respond to a PCI cfg access
6046 	 * when the reset command is given to the chip.
6047 	 *
6048 	 * How do these hardware designers expect things to work
6049 	 * properly if the PCI write is posted for a long period
6050 	 * of time?  It is always necessary to have some method by
6051 	 * which a register read back can occur to push the write
6052 	 * out which does the reset.
6053 	 *
6054 	 * For most tg3 variants the trick below was working.
6055 	 * Ho hum...
6056 	 */
6057 	udelay(120);
6058 
6059 	/* Flush PCI posted writes.  The normal MMIO registers
6060 	 * are inaccessible at this time so this is the only
6061 	 * way to make this reliably (actually, this is no longer
6062 	 * the case, see above).  I tried to use indirect
6063 	 * register read/write but this upset some 5701 variants.
6064 	 */
6065 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6066 
6067 	udelay(120);
6068 
6069 	if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6070 		if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6071 			int i;
6072 			u32 cfg_val;
6073 
6074 			/* Wait for link training to complete.  */
6075 			for (i = 0; i < 5000; i++)
6076 				udelay(100);
6077 
6078 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6079 			pci_write_config_dword(tp->pdev, 0xc4,
6080 					       cfg_val | (1 << 15));
6081 		}
6082 
6083 		/* Set PCIE max payload size to 128 bytes and
6084 		 * clear the "no snoop" and "relaxed ordering" bits.
6085 		 */
6086 		pci_write_config_word(tp->pdev,
6087 				      tp->pcie_cap + PCI_EXP_DEVCTL,
6088 				      0);
6089 
6090 		pcie_set_readrq(tp->pdev, 4096);
6091 
6092 		/* Clear error status */
6093 		pci_write_config_word(tp->pdev,
6094 				      tp->pcie_cap + PCI_EXP_DEVSTA,
6095 				      PCI_EXP_DEVSTA_CED |
6096 				      PCI_EXP_DEVSTA_NFED |
6097 				      PCI_EXP_DEVSTA_FED |
6098 				      PCI_EXP_DEVSTA_URD);
6099 	}
6100 
6101 	tg3_restore_pci_state(tp);
6102 
6103 	tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6104 
6105 	val = 0;
6106 	if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6107 		val = tr32(MEMARB_MODE);
6108 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6109 
6110 	if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6111 		tg3_stop_fw(tp);
6112 		tw32(0x5000, 0x400);
6113 	}
6114 
6115 	tw32(GRC_MODE, tp->grc_mode);
6116 
6117 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6118 		val = tr32(0xc4);
6119 
6120 		tw32(0xc4, val | (1 << 15));
6121 	}
6122 
6123 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6124 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6125 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6126 		if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6127 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6128 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6129 	}
6130 
6131 	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6132 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6133 		tw32_f(MAC_MODE, tp->mac_mode);
6134 	} else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6135 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6136 		tw32_f(MAC_MODE, tp->mac_mode);
6137 	} else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6138 		tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6139 		if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6140 			tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6141 		tw32_f(MAC_MODE, tp->mac_mode);
6142 	} else
6143 		tw32_f(MAC_MODE, 0);
6144 	udelay(40);
6145 
6146 	tg3_mdio_start(tp);
6147 
6148 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6149 
6150 	err = tg3_poll_fw(tp);
6151 	if (err)
6152 		return err;
6153 
6154 	if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6155 	    tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6156 		val = tr32(0x7c00);
6157 
6158 		tw32(0x7c00, val | (1 << 25));
6159 	}
6160 
6161 	/* Reprobe ASF enable state.  */
6162 	tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6163 	tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6164 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6165 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6166 		u32 nic_cfg;
6167 
6168 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6169 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6170 			tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6171 			tp->last_event_jiffies = jiffies;
6172 			if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6173 				tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6174 		}
6175 	}
6176 
6177 	return 0;
6178 }
6179 
6180 /* tp->lock is held. */
tg3_stop_fw(struct tg3 * tp)6181 static void tg3_stop_fw(struct tg3 *tp)
6182 {
6183 	if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6184 	   !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6185 		/* Wait for RX cpu to ACK the previous event. */
6186 		tg3_wait_for_event_ack(tp);
6187 
6188 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6189 
6190 		tg3_generate_fw_event(tp);
6191 
6192 		/* Wait for RX cpu to ACK this event. */
6193 		tg3_wait_for_event_ack(tp);
6194 	}
6195 }
6196 
6197 /* tp->lock is held. */
tg3_halt(struct tg3 * tp,int kind,int silent)6198 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6199 {
6200 	int err;
6201 
6202 	tg3_stop_fw(tp);
6203 
6204 	tg3_write_sig_pre_reset(tp, kind);
6205 
6206 	tg3_abort_hw(tp, silent);
6207 	err = tg3_chip_reset(tp);
6208 
6209 	tg3_write_sig_legacy(tp, kind);
6210 	tg3_write_sig_post_reset(tp, kind);
6211 
6212 	if (err)
6213 		return err;
6214 
6215 	return 0;
6216 }
6217 
6218 #define RX_CPU_SCRATCH_BASE	0x30000
6219 #define RX_CPU_SCRATCH_SIZE	0x04000
6220 #define TX_CPU_SCRATCH_BASE	0x34000
6221 #define TX_CPU_SCRATCH_SIZE	0x04000
6222 
6223 /* tp->lock is held. */
tg3_halt_cpu(struct tg3 * tp,u32 offset)6224 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6225 {
6226 	int i;
6227 
6228 	BUG_ON(offset == TX_CPU_BASE &&
6229 	    (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6230 
6231 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6232 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
6233 
6234 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6235 		return 0;
6236 	}
6237 	if (offset == RX_CPU_BASE) {
6238 		for (i = 0; i < 10000; i++) {
6239 			tw32(offset + CPU_STATE, 0xffffffff);
6240 			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6241 			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6242 				break;
6243 		}
6244 
6245 		tw32(offset + CPU_STATE, 0xffffffff);
6246 		tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
6247 		udelay(10);
6248 	} else {
6249 		for (i = 0; i < 10000; i++) {
6250 			tw32(offset + CPU_STATE, 0xffffffff);
6251 			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6252 			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6253 				break;
6254 		}
6255 	}
6256 
6257 	if (i >= 10000) {
6258 		printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6259 		       "and %s CPU\n",
6260 		       tp->dev->name,
6261 		       (offset == RX_CPU_BASE ? "RX" : "TX"));
6262 		return -ENODEV;
6263 	}
6264 
6265 	/* Clear firmware's nvram arbitration. */
6266 	if (tp->tg3_flags & TG3_FLAG_NVRAM)
6267 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6268 	return 0;
6269 }
6270 
6271 struct fw_info {
6272 	unsigned int fw_base;
6273 	unsigned int fw_len;
6274 	const __be32 *fw_data;
6275 };
6276 
6277 /* tp->lock is held. */
tg3_load_firmware_cpu(struct tg3 * tp,u32 cpu_base,u32 cpu_scratch_base,int cpu_scratch_size,struct fw_info * info)6278 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6279 				 int cpu_scratch_size, struct fw_info *info)
6280 {
6281 	int err, lock_err, i;
6282 	void (*write_op)(struct tg3 *, u32, u32);
6283 
6284 	if (cpu_base == TX_CPU_BASE &&
6285 	    (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6286 		printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6287 		       "TX cpu firmware on %s which is 5705.\n",
6288 		       tp->dev->name);
6289 		return -EINVAL;
6290 	}
6291 
6292 	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6293 		write_op = tg3_write_mem;
6294 	else
6295 		write_op = tg3_write_indirect_reg32;
6296 
6297 	/* It is possible that bootcode is still loading at this point.
6298 	 * Get the nvram lock first before halting the cpu.
6299 	 */
6300 	lock_err = tg3_nvram_lock(tp);
6301 	err = tg3_halt_cpu(tp, cpu_base);
6302 	if (!lock_err)
6303 		tg3_nvram_unlock(tp);
6304 	if (err)
6305 		goto out;
6306 
6307 	for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6308 		write_op(tp, cpu_scratch_base + i, 0);
6309 	tw32(cpu_base + CPU_STATE, 0xffffffff);
6310 	tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6311 	for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
6312 		write_op(tp, (cpu_scratch_base +
6313 			      (info->fw_base & 0xffff) +
6314 			      (i * sizeof(u32))),
6315 			      be32_to_cpu(info->fw_data[i]));
6316 
6317 	err = 0;
6318 
6319 out:
6320 	return err;
6321 }
6322 
6323 /* tp->lock is held. */
tg3_load_5701_a0_firmware_fix(struct tg3 * tp)6324 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6325 {
6326 	struct fw_info info;
6327 	const __be32 *fw_data;
6328 	int err, i;
6329 
6330 	fw_data = (void *)tp->fw->data;
6331 
6332 	/* Firmware blob starts with version numbers, followed by
6333 	   start address and length. We are setting complete length.
6334 	   length = end_address_of_bss - start_address_of_text.
6335 	   Remainder is the blob to be loaded contiguously
6336 	   from start address. */
6337 
6338 	info.fw_base = be32_to_cpu(fw_data[1]);
6339 	info.fw_len = tp->fw->size - 12;
6340 	info.fw_data = &fw_data[3];
6341 
6342 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6343 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6344 				    &info);
6345 	if (err)
6346 		return err;
6347 
6348 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6349 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6350 				    &info);
6351 	if (err)
6352 		return err;
6353 
6354 	/* Now startup only the RX cpu. */
6355 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6356 	tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6357 
6358 	for (i = 0; i < 5; i++) {
6359 		if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
6360 			break;
6361 		tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6362 		tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
6363 		tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6364 		udelay(1000);
6365 	}
6366 	if (i >= 5) {
6367 		printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6368 		       "to set RX CPU PC, is %08x should be %08x\n",
6369 		       tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6370 		       info.fw_base);
6371 		return -ENODEV;
6372 	}
6373 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6374 	tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
6375 
6376 	return 0;
6377 }
6378 
6379 /* 5705 needs a special version of the TSO firmware.  */
6380 
6381 /* tp->lock is held. */
tg3_load_tso_firmware(struct tg3 * tp)6382 static int tg3_load_tso_firmware(struct tg3 *tp)
6383 {
6384 	struct fw_info info;
6385 	const __be32 *fw_data;
6386 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6387 	int err, i;
6388 
6389 	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6390 		return 0;
6391 
6392 	fw_data = (void *)tp->fw->data;
6393 
6394 	/* Firmware blob starts with version numbers, followed by
6395 	   start address and length. We are setting complete length.
6396 	   length = end_address_of_bss - start_address_of_text.
6397 	   Remainder is the blob to be loaded contiguously
6398 	   from start address. */
6399 
6400 	info.fw_base = be32_to_cpu(fw_data[1]);
6401 	cpu_scratch_size = tp->fw_len;
6402 	info.fw_len = tp->fw->size - 12;
6403 	info.fw_data = &fw_data[3];
6404 
6405 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6406 		cpu_base = RX_CPU_BASE;
6407 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6408 	} else {
6409 		cpu_base = TX_CPU_BASE;
6410 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6411 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6412 	}
6413 
6414 	err = tg3_load_firmware_cpu(tp, cpu_base,
6415 				    cpu_scratch_base, cpu_scratch_size,
6416 				    &info);
6417 	if (err)
6418 		return err;
6419 
6420 	/* Now startup the cpu. */
6421 	tw32(cpu_base + CPU_STATE, 0xffffffff);
6422 	tw32_f(cpu_base + CPU_PC, info.fw_base);
6423 
6424 	for (i = 0; i < 5; i++) {
6425 		if (tr32(cpu_base + CPU_PC) == info.fw_base)
6426 			break;
6427 		tw32(cpu_base + CPU_STATE, 0xffffffff);
6428 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6429 		tw32_f(cpu_base + CPU_PC, info.fw_base);
6430 		udelay(1000);
6431 	}
6432 	if (i >= 5) {
6433 		printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6434 		       "to set CPU PC, is %08x should be %08x\n",
6435 		       tp->dev->name, tr32(cpu_base + CPU_PC),
6436 		       info.fw_base);
6437 		return -ENODEV;
6438 	}
6439 	tw32(cpu_base + CPU_STATE, 0xffffffff);
6440 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
6441 	return 0;
6442 }
6443 
6444 
tg3_set_mac_addr(struct net_device * dev,void * p)6445 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6446 {
6447 	struct tg3 *tp = netdev_priv(dev);
6448 	struct sockaddr *addr = p;
6449 	int err = 0, skip_mac_1 = 0;
6450 
6451 	if (!is_valid_ether_addr(addr->sa_data))
6452 		return -EINVAL;
6453 
6454 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6455 
6456 	if (!netif_running(dev))
6457 		return 0;
6458 
6459 	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6460 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
6461 
6462 		addr0_high = tr32(MAC_ADDR_0_HIGH);
6463 		addr0_low = tr32(MAC_ADDR_0_LOW);
6464 		addr1_high = tr32(MAC_ADDR_1_HIGH);
6465 		addr1_low = tr32(MAC_ADDR_1_LOW);
6466 
6467 		/* Skip MAC addr 1 if ASF is using it. */
6468 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6469 		    !(addr1_high == 0 && addr1_low == 0))
6470 			skip_mac_1 = 1;
6471 	}
6472 	spin_lock_bh(&tp->lock);
6473 	__tg3_set_mac_addr(tp, skip_mac_1);
6474 	spin_unlock_bh(&tp->lock);
6475 
6476 	return err;
6477 }
6478 
6479 /* tp->lock is held. */
tg3_set_bdinfo(struct tg3 * tp,u32 bdinfo_addr,dma_addr_t mapping,u32 maxlen_flags,u32 nic_addr)6480 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6481 			   dma_addr_t mapping, u32 maxlen_flags,
6482 			   u32 nic_addr)
6483 {
6484 	tg3_write_mem(tp,
6485 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6486 		      ((u64) mapping >> 32));
6487 	tg3_write_mem(tp,
6488 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6489 		      ((u64) mapping & 0xffffffff));
6490 	tg3_write_mem(tp,
6491 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6492 		       maxlen_flags);
6493 
6494 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6495 		tg3_write_mem(tp,
6496 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6497 			      nic_addr);
6498 }
6499 
6500 static void __tg3_set_rx_mode(struct net_device *);
__tg3_set_coalesce(struct tg3 * tp,struct ethtool_coalesce * ec)6501 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6502 {
6503 	tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6504 	tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6505 	tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6506 	tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6507 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6508 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6509 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6510 	}
6511 	tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6512 	tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6513 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6514 		u32 val = ec->stats_block_coalesce_usecs;
6515 
6516 		if (!netif_carrier_ok(tp->dev))
6517 			val = 0;
6518 
6519 		tw32(HOSTCC_STAT_COAL_TICKS, val);
6520 	}
6521 }
6522 
6523 /* tp->lock is held. */
tg3_reset_hw(struct tg3 * tp,int reset_phy)6524 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6525 {
6526 	u32 val, rdmac_mode;
6527 	int i, err, limit;
6528 
6529 	tg3_disable_ints(tp);
6530 
6531 	tg3_stop_fw(tp);
6532 
6533 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6534 
6535 	if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6536 		tg3_abort_hw(tp, 1);
6537 	}
6538 
6539 	if (reset_phy &&
6540 	    !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
6541 		tg3_phy_reset(tp);
6542 
6543 	err = tg3_chip_reset(tp);
6544 	if (err)
6545 		return err;
6546 
6547 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6548 
6549 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
6550 		val = tr32(TG3_CPMU_CTRL);
6551 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6552 		tw32(TG3_CPMU_CTRL, val);
6553 
6554 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6555 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6556 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
6557 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6558 
6559 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6560 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6561 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
6562 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6563 
6564 		val = tr32(TG3_CPMU_HST_ACC);
6565 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
6566 		val |= CPMU_HST_ACC_MACCLK_6_25;
6567 		tw32(TG3_CPMU_HST_ACC, val);
6568 	}
6569 
6570 	/* This works around an issue with Athlon chipsets on
6571 	 * B3 tigon3 silicon.  This bit has no effect on any
6572 	 * other revision.  But do not set this on PCI Express
6573 	 * chips and don't even touch the clocks if the CPMU is present.
6574 	 */
6575 	if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6576 		if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6577 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6578 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6579 	}
6580 
6581 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6582 	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6583 		val = tr32(TG3PCI_PCISTATE);
6584 		val |= PCISTATE_RETRY_SAME_DMA;
6585 		tw32(TG3PCI_PCISTATE, val);
6586 	}
6587 
6588 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6589 		/* Allow reads and writes to the
6590 		 * APE register and memory space.
6591 		 */
6592 		val = tr32(TG3PCI_PCISTATE);
6593 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6594 		       PCISTATE_ALLOW_APE_SHMEM_WR;
6595 		tw32(TG3PCI_PCISTATE, val);
6596 	}
6597 
6598 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6599 		/* Enable some hw fixes.  */
6600 		val = tr32(TG3PCI_MSI_DATA);
6601 		val |= (1 << 26) | (1 << 28) | (1 << 29);
6602 		tw32(TG3PCI_MSI_DATA, val);
6603 	}
6604 
6605 	/* Descriptor ring init may make accesses to the
6606 	 * NIC SRAM area to setup the TX descriptors, so we
6607 	 * can only do this after the hardware has been
6608 	 * successfully reset.
6609 	 */
6610 	err = tg3_init_rings(tp);
6611 	if (err)
6612 		return err;
6613 
6614 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6615 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6616 		/* This value is determined during the probe time DMA
6617 		 * engine test, tg3_test_dma.
6618 		 */
6619 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6620 	}
6621 
6622 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6623 			  GRC_MODE_4X_NIC_SEND_RINGS |
6624 			  GRC_MODE_NO_TX_PHDR_CSUM |
6625 			  GRC_MODE_NO_RX_PHDR_CSUM);
6626 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6627 
6628 	/* Pseudo-header checksum is done by hardware logic and not
6629 	 * the offload processers, so make the chip do the pseudo-
6630 	 * header checksums on receive.  For transmit it is more
6631 	 * convenient to do the pseudo-header checksum in software
6632 	 * as Linux does that on transmit for us in all cases.
6633 	 */
6634 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6635 
6636 	tw32(GRC_MODE,
6637 	     tp->grc_mode |
6638 	     (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6639 
6640 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
6641 	val = tr32(GRC_MISC_CFG);
6642 	val &= ~0xff;
6643 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6644 	tw32(GRC_MISC_CFG, val);
6645 
6646 	/* Initialize MBUF/DESC pool. */
6647 	if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6648 		/* Do nothing.  */
6649 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6650 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6651 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6652 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6653 		else
6654 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6655 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6656 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6657 	}
6658 	else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6659 		int fw_len;
6660 
6661 		fw_len = tp->fw_len;
6662 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6663 		tw32(BUFMGR_MB_POOL_ADDR,
6664 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6665 		tw32(BUFMGR_MB_POOL_SIZE,
6666 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6667 	}
6668 
6669 	if (tp->dev->mtu <= ETH_DATA_LEN) {
6670 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
6671 		     tp->bufmgr_config.mbuf_read_dma_low_water);
6672 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
6673 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
6674 		tw32(BUFMGR_MB_HIGH_WATER,
6675 		     tp->bufmgr_config.mbuf_high_water);
6676 	} else {
6677 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
6678 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6679 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
6680 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6681 		tw32(BUFMGR_MB_HIGH_WATER,
6682 		     tp->bufmgr_config.mbuf_high_water_jumbo);
6683 	}
6684 	tw32(BUFMGR_DMA_LOW_WATER,
6685 	     tp->bufmgr_config.dma_low_water);
6686 	tw32(BUFMGR_DMA_HIGH_WATER,
6687 	     tp->bufmgr_config.dma_high_water);
6688 
6689 	tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6690 	for (i = 0; i < 2000; i++) {
6691 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6692 			break;
6693 		udelay(10);
6694 	}
6695 	if (i >= 2000) {
6696 		printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6697 		       tp->dev->name);
6698 		return -ENODEV;
6699 	}
6700 
6701 	/* Setup replenish threshold. */
6702 	val = tp->rx_pending / 8;
6703 	if (val == 0)
6704 		val = 1;
6705 	else if (val > tp->rx_std_max_post)
6706 		val = tp->rx_std_max_post;
6707 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6708 		if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6709 			tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6710 
6711 		if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6712 			val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6713 	}
6714 
6715 	tw32(RCVBDI_STD_THRESH, val);
6716 
6717 	/* Initialize TG3_BDINFO's at:
6718 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
6719 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
6720 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
6721 	 *
6722 	 * like so:
6723 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
6724 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
6725 	 *                              ring attribute flags
6726 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
6727 	 *
6728 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6729 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6730 	 *
6731 	 * The size of each ring is fixed in the firmware, but the location is
6732 	 * configurable.
6733 	 */
6734 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6735 	     ((u64) tp->rx_std_mapping >> 32));
6736 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6737 	     ((u64) tp->rx_std_mapping & 0xffffffff));
6738 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6739 	     NIC_SRAM_RX_BUFFER_DESC);
6740 
6741 	/* Don't even try to program the JUMBO/MINI buffer descriptor
6742 	 * configs on 5705.
6743 	 */
6744 	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6745 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6746 		     RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6747 	} else {
6748 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6749 		     RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6750 
6751 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6752 		     BDINFO_FLAGS_DISABLED);
6753 
6754 		/* Setup replenish threshold. */
6755 		tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6756 
6757 		if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6758 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6759 			     ((u64) tp->rx_jumbo_mapping >> 32));
6760 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6761 			     ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6762 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6763 			     RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6764 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6765 			     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6766 		} else {
6767 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6768 			     BDINFO_FLAGS_DISABLED);
6769 		}
6770 
6771 	}
6772 
6773 	/* There is only one send ring on 5705/5750, no need to explicitly
6774 	 * disable the others.
6775 	 */
6776 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6777 		/* Clear out send RCB ring in SRAM. */
6778 		for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6779 			tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6780 				      BDINFO_FLAGS_DISABLED);
6781 	}
6782 
6783 	tp->tx_prod = 0;
6784 	tp->tx_cons = 0;
6785 	tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6786 	tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6787 
6788 	tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6789 		       tp->tx_desc_mapping,
6790 		       (TG3_TX_RING_SIZE <<
6791 			BDINFO_FLAGS_MAXLEN_SHIFT),
6792 		       NIC_SRAM_TX_BUFFER_DESC);
6793 
6794 	/* There is only one receive return ring on 5705/5750, no need
6795 	 * to explicitly disable the others.
6796 	 */
6797 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6798 		for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6799 		     i += TG3_BDINFO_SIZE) {
6800 			tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6801 				      BDINFO_FLAGS_DISABLED);
6802 		}
6803 	}
6804 
6805 	tp->rx_rcb_ptr = 0;
6806 	tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6807 
6808 	tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6809 		       tp->rx_rcb_mapping,
6810 		       (TG3_RX_RCB_RING_SIZE(tp) <<
6811 			BDINFO_FLAGS_MAXLEN_SHIFT),
6812 		       0);
6813 
6814 	tp->rx_std_ptr = tp->rx_pending;
6815 	tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6816 		     tp->rx_std_ptr);
6817 
6818 	tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6819 						tp->rx_jumbo_pending : 0;
6820 	tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6821 		     tp->rx_jumbo_ptr);
6822 
6823 	/* Initialize MAC address and backoff seed. */
6824 	__tg3_set_mac_addr(tp, 0);
6825 
6826 	/* MTU + ethernet header + FCS + optional VLAN tag */
6827 	tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6828 
6829 	/* The slot time is changed by tg3_setup_phy if we
6830 	 * run at gigabit with half duplex.
6831 	 */
6832 	tw32(MAC_TX_LENGTHS,
6833 	     (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6834 	     (6 << TX_LENGTHS_IPG_SHIFT) |
6835 	     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6836 
6837 	/* Receive rules. */
6838 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6839 	tw32(RCVLPC_CONFIG, 0x0181);
6840 
6841 	/* Calculate RDMAC_MODE setting early, we need it to determine
6842 	 * the RCVLPC_STATE_ENABLE mask.
6843 	 */
6844 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6845 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6846 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6847 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6848 		      RDMAC_MODE_LNGREAD_ENAB);
6849 
6850 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
6851 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
6852 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
6853 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6854 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6855 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6856 
6857 	/* If statement applies to 5705 and 5750 PCI devices only */
6858 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6859 	     tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6860 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6861 		if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6862 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6863 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6864 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6865 			   !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6866 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6867 		}
6868 	}
6869 
6870 	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6871 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6872 
6873 	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6874 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
6875 
6876 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
6877 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
6878 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
6879 
6880 	/* Receive/send statistics. */
6881 	if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6882 		val = tr32(RCVLPC_STATS_ENABLE);
6883 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
6884 		tw32(RCVLPC_STATS_ENABLE, val);
6885 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6886 		   (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6887 		val = tr32(RCVLPC_STATS_ENABLE);
6888 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6889 		tw32(RCVLPC_STATS_ENABLE, val);
6890 	} else {
6891 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6892 	}
6893 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6894 	tw32(SNDDATAI_STATSENAB, 0xffffff);
6895 	tw32(SNDDATAI_STATSCTRL,
6896 	     (SNDDATAI_SCTRL_ENABLE |
6897 	      SNDDATAI_SCTRL_FASTUPD));
6898 
6899 	/* Setup host coalescing engine. */
6900 	tw32(HOSTCC_MODE, 0);
6901 	for (i = 0; i < 2000; i++) {
6902 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6903 			break;
6904 		udelay(10);
6905 	}
6906 
6907 	__tg3_set_coalesce(tp, &tp->coal);
6908 
6909 	/* set status block DMA address */
6910 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6911 	     ((u64) tp->status_mapping >> 32));
6912 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6913 	     ((u64) tp->status_mapping & 0xffffffff));
6914 
6915 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6916 		/* Status/statistics block address.  See tg3_timer,
6917 		 * the tg3_periodic_fetch_stats call there, and
6918 		 * tg3_get_stats to see how this works for 5705/5750 chips.
6919 		 */
6920 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6921 		     ((u64) tp->stats_mapping >> 32));
6922 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6923 		     ((u64) tp->stats_mapping & 0xffffffff));
6924 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6925 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6926 	}
6927 
6928 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6929 
6930 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6931 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6932 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6933 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6934 
6935 	/* Clear statistics/status block in chip, and status block in ram. */
6936 	for (i = NIC_SRAM_STATS_BLK;
6937 	     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6938 	     i += sizeof(u32)) {
6939 		tg3_write_mem(tp, i, 0);
6940 		udelay(40);
6941 	}
6942 	memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6943 
6944 	if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6945 		tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6946 		/* reset to prevent losing 1st rx packet intermittently */
6947 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6948 		udelay(10);
6949 	}
6950 
6951 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6952 		tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
6953 	else
6954 		tp->mac_mode = 0;
6955 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6956 		MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6957 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6958 	    !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6959 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6960 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6961 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6962 	udelay(40);
6963 
6964 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6965 	 * If TG3_FLG2_IS_NIC is zero, we should read the
6966 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
6967 	 * whether used as inputs or outputs, are set by boot code after
6968 	 * reset.
6969 	 */
6970 	if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6971 		u32 gpio_mask;
6972 
6973 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6974 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6975 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6976 
6977 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6978 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6979 				     GRC_LCLCTRL_GPIO_OUTPUT3;
6980 
6981 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6982 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6983 
6984 		tp->grc_local_ctrl &= ~gpio_mask;
6985 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6986 
6987 		/* GPIO1 must be driven high for eeprom write protect */
6988 		if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6989 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6990 					       GRC_LCLCTRL_GPIO_OUTPUT1);
6991 	}
6992 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6993 	udelay(100);
6994 
6995 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6996 	tp->last_tag = 0;
6997 
6998 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6999 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7000 		udelay(40);
7001 	}
7002 
7003 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7004 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7005 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7006 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7007 	       WDMAC_MODE_LNGREAD_ENAB);
7008 
7009 	/* If statement applies to 5705 and 5750 PCI devices only */
7010 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7011 	     tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7012 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7013 		if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7014 		    (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7015 		     tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7016 			/* nothing */
7017 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7018 			   !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7019 			   !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7020 			val |= WDMAC_MODE_RX_ACCEL;
7021 		}
7022 	}
7023 
7024 	/* Enable host coalescing bug fix */
7025 	if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7026 		val |= WDMAC_MODE_STATUS_TAG_FIX;
7027 
7028 	tw32_f(WDMAC_MODE, val);
7029 	udelay(40);
7030 
7031 	if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7032 		u16 pcix_cmd;
7033 
7034 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7035 				     &pcix_cmd);
7036 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7037 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7038 			pcix_cmd |= PCI_X_CMD_READ_2K;
7039 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7040 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7041 			pcix_cmd |= PCI_X_CMD_READ_2K;
7042 		}
7043 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7044 				      pcix_cmd);
7045 	}
7046 
7047 	tw32_f(RDMAC_MODE, rdmac_mode);
7048 	udelay(40);
7049 
7050 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7051 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7052 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7053 
7054 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7055 		tw32(SNDDATAC_MODE,
7056 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7057 	else
7058 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7059 
7060 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7061 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7062 	tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7063 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7064 	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7065 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7066 	tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7067 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7068 
7069 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7070 		err = tg3_load_5701_a0_firmware_fix(tp);
7071 		if (err)
7072 			return err;
7073 	}
7074 
7075 	if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7076 		err = tg3_load_tso_firmware(tp);
7077 		if (err)
7078 			return err;
7079 	}
7080 
7081 	tp->tx_mode = TX_MODE_ENABLE;
7082 	tw32_f(MAC_TX_MODE, tp->tx_mode);
7083 	udelay(100);
7084 
7085 	tp->rx_mode = RX_MODE_ENABLE;
7086 	if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7087 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7088 
7089 	tw32_f(MAC_RX_MODE, tp->rx_mode);
7090 	udelay(10);
7091 
7092 	tw32(MAC_LED_CTRL, tp->led_ctrl);
7093 
7094 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7095 	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7096 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7097 		udelay(10);
7098 	}
7099 	tw32_f(MAC_RX_MODE, tp->rx_mode);
7100 	udelay(10);
7101 
7102 	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7103 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7104 			!(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7105 			/* Set drive transmission level to 1.2V  */
7106 			/* only if the signal pre-emphasis bit is not set  */
7107 			val = tr32(MAC_SERDES_CFG);
7108 			val &= 0xfffff000;
7109 			val |= 0x880;
7110 			tw32(MAC_SERDES_CFG, val);
7111 		}
7112 		if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7113 			tw32(MAC_SERDES_CFG, 0x616000);
7114 	}
7115 
7116 	/* Prevent chip from dropping frames when flow control
7117 	 * is enabled.
7118 	 */
7119 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7120 
7121 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7122 	    (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7123 		/* Use hardware link auto-negotiation */
7124 		tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7125 	}
7126 
7127 	if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7128 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7129 		u32 tmp;
7130 
7131 		tmp = tr32(SERDES_RX_CTRL);
7132 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7133 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7134 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7135 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7136 	}
7137 
7138 	if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7139 		if (tp->link_config.phy_is_low_power) {
7140 			tp->link_config.phy_is_low_power = 0;
7141 			tp->link_config.speed = tp->link_config.orig_speed;
7142 			tp->link_config.duplex = tp->link_config.orig_duplex;
7143 			tp->link_config.autoneg = tp->link_config.orig_autoneg;
7144 		}
7145 
7146 		err = tg3_setup_phy(tp, 0);
7147 		if (err)
7148 			return err;
7149 
7150 		if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7151 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7152 			u32 tmp;
7153 
7154 			/* Clear CRC stats. */
7155 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7156 				tg3_writephy(tp, MII_TG3_TEST1,
7157 					     tmp | MII_TG3_TEST1_CRC_EN);
7158 				tg3_readphy(tp, 0x14, &tmp);
7159 			}
7160 		}
7161 	}
7162 
7163 	__tg3_set_rx_mode(tp->dev);
7164 
7165 	/* Initialize receive rules. */
7166 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7167 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7168 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7169 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7170 
7171 	if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7172 	    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7173 		limit = 8;
7174 	else
7175 		limit = 16;
7176 	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7177 		limit -= 4;
7178 	switch (limit) {
7179 	case 16:
7180 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7181 	case 15:
7182 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7183 	case 14:
7184 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7185 	case 13:
7186 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7187 	case 12:
7188 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7189 	case 11:
7190 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7191 	case 10:
7192 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7193 	case 9:
7194 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7195 	case 8:
7196 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7197 	case 7:
7198 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7199 	case 6:
7200 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7201 	case 5:
7202 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7203 	case 4:
7204 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7205 	case 3:
7206 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7207 	case 2:
7208 	case 1:
7209 
7210 	default:
7211 		break;
7212 	}
7213 
7214 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7215 		/* Write our heartbeat update interval to APE. */
7216 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7217 				APE_HOST_HEARTBEAT_INT_DISABLE);
7218 
7219 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7220 
7221 	return 0;
7222 }
7223 
7224 /* Called at device open time to get the chip ready for
7225  * packet processing.  Invoked with tp->lock held.
7226  */
tg3_init_hw(struct tg3 * tp,int reset_phy)7227 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7228 {
7229 	tg3_switch_clocks(tp);
7230 
7231 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7232 
7233 	return tg3_reset_hw(tp, reset_phy);
7234 }
7235 
7236 #define TG3_STAT_ADD32(PSTAT, REG) \
7237 do {	u32 __val = tr32(REG); \
7238 	(PSTAT)->low += __val; \
7239 	if ((PSTAT)->low < __val) \
7240 		(PSTAT)->high += 1; \
7241 } while (0)
7242 
tg3_periodic_fetch_stats(struct tg3 * tp)7243 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7244 {
7245 	struct tg3_hw_stats *sp = tp->hw_stats;
7246 
7247 	if (!netif_carrier_ok(tp->dev))
7248 		return;
7249 
7250 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7251 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7252 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7253 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7254 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7255 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7256 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7257 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7258 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7259 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7260 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7261 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7262 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7263 
7264 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7265 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7266 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7267 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7268 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7269 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7270 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7271 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7272 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7273 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7274 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7275 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7276 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7277 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7278 
7279 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7280 	TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7281 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7282 }
7283 
tg3_timer(unsigned long __opaque)7284 static void tg3_timer(unsigned long __opaque)
7285 {
7286 	struct tg3 *tp = (struct tg3 *) __opaque;
7287 
7288 	if (tp->irq_sync)
7289 		goto restart_timer;
7290 
7291 	spin_lock(&tp->lock);
7292 
7293 	if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7294 		/* All of this garbage is because when using non-tagged
7295 		 * IRQ status the mailbox/status_block protocol the chip
7296 		 * uses with the cpu is race prone.
7297 		 */
7298 		if (tp->hw_status->status & SD_STATUS_UPDATED) {
7299 			tw32(GRC_LOCAL_CTRL,
7300 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7301 		} else {
7302 			tw32(HOSTCC_MODE, tp->coalesce_mode |
7303 			     (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7304 		}
7305 
7306 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7307 			tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7308 			spin_unlock(&tp->lock);
7309 			schedule_work(&tp->reset_task);
7310 			return;
7311 		}
7312 	}
7313 
7314 	/* This part only runs once per second. */
7315 	if (!--tp->timer_counter) {
7316 		if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7317 			tg3_periodic_fetch_stats(tp);
7318 
7319 		if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7320 			u32 mac_stat;
7321 			int phy_event;
7322 
7323 			mac_stat = tr32(MAC_STATUS);
7324 
7325 			phy_event = 0;
7326 			if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7327 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7328 					phy_event = 1;
7329 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7330 				phy_event = 1;
7331 
7332 			if (phy_event)
7333 				tg3_setup_phy(tp, 0);
7334 		} else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7335 			u32 mac_stat = tr32(MAC_STATUS);
7336 			int need_setup = 0;
7337 
7338 			if (netif_carrier_ok(tp->dev) &&
7339 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7340 				need_setup = 1;
7341 			}
7342 			if (! netif_carrier_ok(tp->dev) &&
7343 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
7344 					 MAC_STATUS_SIGNAL_DET))) {
7345 				need_setup = 1;
7346 			}
7347 			if (need_setup) {
7348 				if (!tp->serdes_counter) {
7349 					tw32_f(MAC_MODE,
7350 					     (tp->mac_mode &
7351 					      ~MAC_MODE_PORT_MODE_MASK));
7352 					udelay(40);
7353 					tw32_f(MAC_MODE, tp->mac_mode);
7354 					udelay(40);
7355 				}
7356 				tg3_setup_phy(tp, 0);
7357 			}
7358 		} else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7359 			tg3_serdes_parallel_detect(tp);
7360 
7361 		tp->timer_counter = tp->timer_multiplier;
7362 	}
7363 
7364 	/* Heartbeat is only sent once every 2 seconds.
7365 	 *
7366 	 * The heartbeat is to tell the ASF firmware that the host
7367 	 * driver is still alive.  In the event that the OS crashes,
7368 	 * ASF needs to reset the hardware to free up the FIFO space
7369 	 * that may be filled with rx packets destined for the host.
7370 	 * If the FIFO is full, ASF will no longer function properly.
7371 	 *
7372 	 * Unintended resets have been reported on real time kernels
7373 	 * where the timer doesn't run on time.  Netpoll will also have
7374 	 * same problem.
7375 	 *
7376 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7377 	 * to check the ring condition when the heartbeat is expiring
7378 	 * before doing the reset.  This will prevent most unintended
7379 	 * resets.
7380 	 */
7381 	if (!--tp->asf_counter) {
7382 		if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7383 		    !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7384 			tg3_wait_for_event_ack(tp);
7385 
7386 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7387 				      FWCMD_NICDRV_ALIVE3);
7388 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7389 			/* 5 seconds timeout */
7390 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7391 
7392 			tg3_generate_fw_event(tp);
7393 		}
7394 		tp->asf_counter = tp->asf_multiplier;
7395 	}
7396 
7397 	spin_unlock(&tp->lock);
7398 
7399 restart_timer:
7400 	tp->timer.expires = jiffies + tp->timer_offset;
7401 	add_timer(&tp->timer);
7402 }
7403 
tg3_request_irq(struct tg3 * tp)7404 static int tg3_request_irq(struct tg3 *tp)
7405 {
7406 	irq_handler_t fn;
7407 	unsigned long flags;
7408 	struct net_device *dev = tp->dev;
7409 
7410 	if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7411 		fn = tg3_msi;
7412 		if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7413 			fn = tg3_msi_1shot;
7414 		flags = IRQF_SAMPLE_RANDOM;
7415 	} else {
7416 		fn = tg3_interrupt;
7417 		if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7418 			fn = tg3_interrupt_tagged;
7419 		flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7420 	}
7421 	return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7422 }
7423 
tg3_test_interrupt(struct tg3 * tp)7424 static int tg3_test_interrupt(struct tg3 *tp)
7425 {
7426 	struct net_device *dev = tp->dev;
7427 	int err, i, intr_ok = 0;
7428 
7429 	if (!netif_running(dev))
7430 		return -ENODEV;
7431 
7432 	tg3_disable_ints(tp);
7433 
7434 	free_irq(tp->pdev->irq, dev);
7435 
7436 	err = request_irq(tp->pdev->irq, tg3_test_isr,
7437 			  IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7438 	if (err)
7439 		return err;
7440 
7441 	tp->hw_status->status &= ~SD_STATUS_UPDATED;
7442 	tg3_enable_ints(tp);
7443 
7444 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7445 	       HOSTCC_MODE_NOW);
7446 
7447 	for (i = 0; i < 5; i++) {
7448 		u32 int_mbox, misc_host_ctrl;
7449 
7450 		int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7451 					TG3_64BIT_REG_LOW);
7452 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7453 
7454 		if ((int_mbox != 0) ||
7455 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7456 			intr_ok = 1;
7457 			break;
7458 		}
7459 
7460 		msleep(10);
7461 	}
7462 
7463 	tg3_disable_ints(tp);
7464 
7465 	free_irq(tp->pdev->irq, dev);
7466 
7467 	err = tg3_request_irq(tp);
7468 
7469 	if (err)
7470 		return err;
7471 
7472 	if (intr_ok)
7473 		return 0;
7474 
7475 	return -EIO;
7476 }
7477 
7478 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7479  * successfully restored
7480  */
tg3_test_msi(struct tg3 * tp)7481 static int tg3_test_msi(struct tg3 *tp)
7482 {
7483 	struct net_device *dev = tp->dev;
7484 	int err;
7485 	u16 pci_cmd;
7486 
7487 	if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7488 		return 0;
7489 
7490 	/* Turn off SERR reporting in case MSI terminates with Master
7491 	 * Abort.
7492 	 */
7493 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7494 	pci_write_config_word(tp->pdev, PCI_COMMAND,
7495 			      pci_cmd & ~PCI_COMMAND_SERR);
7496 
7497 	err = tg3_test_interrupt(tp);
7498 
7499 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7500 
7501 	if (!err)
7502 		return 0;
7503 
7504 	/* other failures */
7505 	if (err != -EIO)
7506 		return err;
7507 
7508 	/* MSI test failed, go back to INTx mode */
7509 	printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7510 	       "switching to INTx mode. Please report this failure to "
7511 	       "the PCI maintainer and include system chipset information.\n",
7512 		       tp->dev->name);
7513 
7514 	free_irq(tp->pdev->irq, dev);
7515 	pci_disable_msi(tp->pdev);
7516 
7517 	tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7518 
7519 	err = tg3_request_irq(tp);
7520 	if (err)
7521 		return err;
7522 
7523 	/* Need to reset the chip because the MSI cycle may have terminated
7524 	 * with Master Abort.
7525 	 */
7526 	tg3_full_lock(tp, 1);
7527 
7528 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7529 	err = tg3_init_hw(tp, 1);
7530 
7531 	tg3_full_unlock(tp);
7532 
7533 	if (err)
7534 		free_irq(tp->pdev->irq, dev);
7535 
7536 	return err;
7537 }
7538 
tg3_request_firmware(struct tg3 * tp)7539 static int tg3_request_firmware(struct tg3 *tp)
7540 {
7541 	const __be32 *fw_data;
7542 
7543 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
7544 		printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
7545 		       tp->dev->name, tp->fw_needed);
7546 		return -ENOENT;
7547 	}
7548 
7549 	fw_data = (void *)tp->fw->data;
7550 
7551 	/* Firmware blob starts with version numbers, followed by
7552 	 * start address and _full_ length including BSS sections
7553 	 * (which must be longer than the actual data, of course
7554 	 */
7555 
7556 	tp->fw_len = be32_to_cpu(fw_data[2]);	/* includes bss */
7557 	if (tp->fw_len < (tp->fw->size - 12)) {
7558 		printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
7559 		       tp->dev->name, tp->fw_len, tp->fw_needed);
7560 		release_firmware(tp->fw);
7561 		tp->fw = NULL;
7562 		return -EINVAL;
7563 	}
7564 
7565 	/* We no longer need firmware; we have it. */
7566 	tp->fw_needed = NULL;
7567 	return 0;
7568 }
7569 
tg3_open(struct net_device * dev)7570 static int tg3_open(struct net_device *dev)
7571 {
7572 	struct tg3 *tp = netdev_priv(dev);
7573 	int err;
7574 
7575 	if (tp->fw_needed) {
7576 		err = tg3_request_firmware(tp);
7577 		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7578 			if (err)
7579 				return err;
7580 		} else if (err) {
7581 			printk(KERN_WARNING "%s: TSO capability disabled.\n",
7582 			       tp->dev->name);
7583 			tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
7584 		} else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7585 			printk(KERN_NOTICE "%s: TSO capability restored.\n",
7586 			       tp->dev->name);
7587 			tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
7588 		}
7589 	}
7590 
7591 	netif_carrier_off(tp->dev);
7592 
7593 	err = tg3_set_power_state(tp, PCI_D0);
7594 	if (err)
7595 		return err;
7596 
7597 	tg3_full_lock(tp, 0);
7598 
7599 	tg3_disable_ints(tp);
7600 	tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7601 
7602 	tg3_full_unlock(tp);
7603 
7604 	/* The placement of this call is tied
7605 	 * to the setup and use of Host TX descriptors.
7606 	 */
7607 	err = tg3_alloc_consistent(tp);
7608 	if (err)
7609 		return err;
7610 
7611 	if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7612 		/* All MSI supporting chips should support tagged
7613 		 * status.  Assert that this is the case.
7614 		 */
7615 		if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7616 			printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7617 			       "Not using MSI.\n", tp->dev->name);
7618 		} else if (pci_enable_msi(tp->pdev) == 0) {
7619 			u32 msi_mode;
7620 
7621 			msi_mode = tr32(MSGINT_MODE);
7622 			tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7623 			tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7624 		}
7625 	}
7626 	err = tg3_request_irq(tp);
7627 
7628 	if (err) {
7629 		if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7630 			pci_disable_msi(tp->pdev);
7631 			tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7632 		}
7633 		tg3_free_consistent(tp);
7634 		return err;
7635 	}
7636 
7637 	napi_enable(&tp->napi);
7638 
7639 	tg3_full_lock(tp, 0);
7640 
7641 	err = tg3_init_hw(tp, 1);
7642 	if (err) {
7643 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7644 		tg3_free_rings(tp);
7645 	} else {
7646 		if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7647 			tp->timer_offset = HZ;
7648 		else
7649 			tp->timer_offset = HZ / 10;
7650 
7651 		BUG_ON(tp->timer_offset > HZ);
7652 		tp->timer_counter = tp->timer_multiplier =
7653 			(HZ / tp->timer_offset);
7654 		tp->asf_counter = tp->asf_multiplier =
7655 			((HZ / tp->timer_offset) * 2);
7656 
7657 		init_timer(&tp->timer);
7658 		tp->timer.expires = jiffies + tp->timer_offset;
7659 		tp->timer.data = (unsigned long) tp;
7660 		tp->timer.function = tg3_timer;
7661 	}
7662 
7663 	tg3_full_unlock(tp);
7664 
7665 	if (err) {
7666 		napi_disable(&tp->napi);
7667 		free_irq(tp->pdev->irq, dev);
7668 		if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7669 			pci_disable_msi(tp->pdev);
7670 			tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7671 		}
7672 		tg3_free_consistent(tp);
7673 		return err;
7674 	}
7675 
7676 	if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7677 		err = tg3_test_msi(tp);
7678 
7679 		if (err) {
7680 			tg3_full_lock(tp, 0);
7681 
7682 			if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7683 				pci_disable_msi(tp->pdev);
7684 				tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7685 			}
7686 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7687 			tg3_free_rings(tp);
7688 			tg3_free_consistent(tp);
7689 
7690 			tg3_full_unlock(tp);
7691 
7692 			napi_disable(&tp->napi);
7693 
7694 			return err;
7695 		}
7696 
7697 		if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7698 			if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7699 				u32 val = tr32(PCIE_TRANSACTION_CFG);
7700 
7701 				tw32(PCIE_TRANSACTION_CFG,
7702 				     val | PCIE_TRANS_CFG_1SHOT_MSI);
7703 			}
7704 		}
7705 	}
7706 
7707 	tg3_phy_start(tp);
7708 
7709 	tg3_full_lock(tp, 0);
7710 
7711 	add_timer(&tp->timer);
7712 	tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7713 	tg3_enable_ints(tp);
7714 
7715 	tg3_full_unlock(tp);
7716 
7717 	netif_start_queue(dev);
7718 
7719 	return 0;
7720 }
7721 
7722 #if 0
7723 /*static*/ void tg3_dump_state(struct tg3 *tp)
7724 {
7725 	u32 val32, val32_2, val32_3, val32_4, val32_5;
7726 	u16 val16;
7727 	int i;
7728 
7729 	pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7730 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7731 	printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7732 	       val16, val32);
7733 
7734 	/* MAC block */
7735 	printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7736 	       tr32(MAC_MODE), tr32(MAC_STATUS));
7737 	printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7738 	       tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7739 	printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7740 	       tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7741 	printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7742 	       tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7743 
7744 	/* Send data initiator control block */
7745 	printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7746 	       tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7747 	printk("       SNDDATAI_STATSCTRL[%08x]\n",
7748 	       tr32(SNDDATAI_STATSCTRL));
7749 
7750 	/* Send data completion control block */
7751 	printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7752 
7753 	/* Send BD ring selector block */
7754 	printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7755 	       tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7756 
7757 	/* Send BD initiator control block */
7758 	printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7759 	       tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7760 
7761 	/* Send BD completion control block */
7762 	printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7763 
7764 	/* Receive list placement control block */
7765 	printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7766 	       tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7767 	printk("       RCVLPC_STATSCTRL[%08x]\n",
7768 	       tr32(RCVLPC_STATSCTRL));
7769 
7770 	/* Receive data and receive BD initiator control block */
7771 	printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7772 	       tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7773 
7774 	/* Receive data completion control block */
7775 	printk("DEBUG: RCVDCC_MODE[%08x]\n",
7776 	       tr32(RCVDCC_MODE));
7777 
7778 	/* Receive BD initiator control block */
7779 	printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7780 	       tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7781 
7782 	/* Receive BD completion control block */
7783 	printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7784 	       tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7785 
7786 	/* Receive list selector control block */
7787 	printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7788 	       tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7789 
7790 	/* Mbuf cluster free block */
7791 	printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7792 	       tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7793 
7794 	/* Host coalescing control block */
7795 	printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7796 	       tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7797 	printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7798 	       tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7799 	       tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7800 	printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7801 	       tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7802 	       tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7803 	printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7804 	       tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7805 	printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7806 	       tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7807 
7808 	/* Memory arbiter control block */
7809 	printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7810 	       tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7811 
7812 	/* Buffer manager control block */
7813 	printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7814 	       tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7815 	printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7816 	       tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7817 	printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7818 	       "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7819 	       tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7820 	       tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7821 
7822 	/* Read DMA control block */
7823 	printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7824 	       tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7825 
7826 	/* Write DMA control block */
7827 	printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7828 	       tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7829 
7830 	/* DMA completion block */
7831 	printk("DEBUG: DMAC_MODE[%08x]\n",
7832 	       tr32(DMAC_MODE));
7833 
7834 	/* GRC block */
7835 	printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7836 	       tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7837 	printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7838 	       tr32(GRC_LOCAL_CTRL));
7839 
7840 	/* TG3_BDINFOs */
7841 	printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7842 	       tr32(RCVDBDI_JUMBO_BD + 0x0),
7843 	       tr32(RCVDBDI_JUMBO_BD + 0x4),
7844 	       tr32(RCVDBDI_JUMBO_BD + 0x8),
7845 	       tr32(RCVDBDI_JUMBO_BD + 0xc));
7846 	printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7847 	       tr32(RCVDBDI_STD_BD + 0x0),
7848 	       tr32(RCVDBDI_STD_BD + 0x4),
7849 	       tr32(RCVDBDI_STD_BD + 0x8),
7850 	       tr32(RCVDBDI_STD_BD + 0xc));
7851 	printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7852 	       tr32(RCVDBDI_MINI_BD + 0x0),
7853 	       tr32(RCVDBDI_MINI_BD + 0x4),
7854 	       tr32(RCVDBDI_MINI_BD + 0x8),
7855 	       tr32(RCVDBDI_MINI_BD + 0xc));
7856 
7857 	tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7858 	tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7859 	tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7860 	tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7861 	printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7862 	       val32, val32_2, val32_3, val32_4);
7863 
7864 	tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7865 	tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7866 	tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7867 	tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7868 	printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7869 	       val32, val32_2, val32_3, val32_4);
7870 
7871 	tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7872 	tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7873 	tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7874 	tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7875 	tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7876 	printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7877 	       val32, val32_2, val32_3, val32_4, val32_5);
7878 
7879 	/* SW status block */
7880 	printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7881 	       tp->hw_status->status,
7882 	       tp->hw_status->status_tag,
7883 	       tp->hw_status->rx_jumbo_consumer,
7884 	       tp->hw_status->rx_consumer,
7885 	       tp->hw_status->rx_mini_consumer,
7886 	       tp->hw_status->idx[0].rx_producer,
7887 	       tp->hw_status->idx[0].tx_consumer);
7888 
7889 	/* SW statistics block */
7890 	printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7891 	       ((u32 *)tp->hw_stats)[0],
7892 	       ((u32 *)tp->hw_stats)[1],
7893 	       ((u32 *)tp->hw_stats)[2],
7894 	       ((u32 *)tp->hw_stats)[3]);
7895 
7896 	/* Mailboxes */
7897 	printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7898 	       tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7899 	       tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7900 	       tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7901 	       tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7902 
7903 	/* NIC side send descriptors. */
7904 	for (i = 0; i < 6; i++) {
7905 		unsigned long txd;
7906 
7907 		txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7908 			+ (i * sizeof(struct tg3_tx_buffer_desc));
7909 		printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7910 		       i,
7911 		       readl(txd + 0x0), readl(txd + 0x4),
7912 		       readl(txd + 0x8), readl(txd + 0xc));
7913 	}
7914 
7915 	/* NIC side RX descriptors. */
7916 	for (i = 0; i < 6; i++) {
7917 		unsigned long rxd;
7918 
7919 		rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7920 			+ (i * sizeof(struct tg3_rx_buffer_desc));
7921 		printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7922 		       i,
7923 		       readl(rxd + 0x0), readl(rxd + 0x4),
7924 		       readl(rxd + 0x8), readl(rxd + 0xc));
7925 		rxd += (4 * sizeof(u32));
7926 		printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7927 		       i,
7928 		       readl(rxd + 0x0), readl(rxd + 0x4),
7929 		       readl(rxd + 0x8), readl(rxd + 0xc));
7930 	}
7931 
7932 	for (i = 0; i < 6; i++) {
7933 		unsigned long rxd;
7934 
7935 		rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7936 			+ (i * sizeof(struct tg3_rx_buffer_desc));
7937 		printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7938 		       i,
7939 		       readl(rxd + 0x0), readl(rxd + 0x4),
7940 		       readl(rxd + 0x8), readl(rxd + 0xc));
7941 		rxd += (4 * sizeof(u32));
7942 		printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7943 		       i,
7944 		       readl(rxd + 0x0), readl(rxd + 0x4),
7945 		       readl(rxd + 0x8), readl(rxd + 0xc));
7946 	}
7947 }
7948 #endif
7949 
7950 static struct net_device_stats *tg3_get_stats(struct net_device *);
7951 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7952 
tg3_close(struct net_device * dev)7953 static int tg3_close(struct net_device *dev)
7954 {
7955 	struct tg3 *tp = netdev_priv(dev);
7956 
7957 	napi_disable(&tp->napi);
7958 	cancel_work_sync(&tp->reset_task);
7959 
7960 	netif_stop_queue(dev);
7961 
7962 	del_timer_sync(&tp->timer);
7963 
7964 	tg3_full_lock(tp, 1);
7965 #if 0
7966 	tg3_dump_state(tp);
7967 #endif
7968 
7969 	tg3_disable_ints(tp);
7970 
7971 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7972 	tg3_free_rings(tp);
7973 	tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7974 
7975 	tg3_full_unlock(tp);
7976 
7977 	free_irq(tp->pdev->irq, dev);
7978 	if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7979 		pci_disable_msi(tp->pdev);
7980 		tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7981 	}
7982 
7983 	memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7984 	       sizeof(tp->net_stats_prev));
7985 	memcpy(&tp->estats_prev, tg3_get_estats(tp),
7986 	       sizeof(tp->estats_prev));
7987 
7988 	tg3_free_consistent(tp);
7989 
7990 	tg3_set_power_state(tp, PCI_D3hot);
7991 
7992 	netif_carrier_off(tp->dev);
7993 
7994 	return 0;
7995 }
7996 
get_stat64(tg3_stat64_t * val)7997 static inline unsigned long get_stat64(tg3_stat64_t *val)
7998 {
7999 	unsigned long ret;
8000 
8001 #if (BITS_PER_LONG == 32)
8002 	ret = val->low;
8003 #else
8004 	ret = ((u64)val->high << 32) | ((u64)val->low);
8005 #endif
8006 	return ret;
8007 }
8008 
get_estat64(tg3_stat64_t * val)8009 static inline u64 get_estat64(tg3_stat64_t *val)
8010 {
8011        return ((u64)val->high << 32) | ((u64)val->low);
8012 }
8013 
calc_crc_errors(struct tg3 * tp)8014 static unsigned long calc_crc_errors(struct tg3 *tp)
8015 {
8016 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
8017 
8018 	if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8019 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8020 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8021 		u32 val;
8022 
8023 		spin_lock_bh(&tp->lock);
8024 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8025 			tg3_writephy(tp, MII_TG3_TEST1,
8026 				     val | MII_TG3_TEST1_CRC_EN);
8027 			tg3_readphy(tp, 0x14, &val);
8028 		} else
8029 			val = 0;
8030 		spin_unlock_bh(&tp->lock);
8031 
8032 		tp->phy_crc_errors += val;
8033 
8034 		return tp->phy_crc_errors;
8035 	}
8036 
8037 	return get_stat64(&hw_stats->rx_fcs_errors);
8038 }
8039 
8040 #define ESTAT_ADD(member) \
8041 	estats->member =	old_estats->member + \
8042 				get_estat64(&hw_stats->member)
8043 
tg3_get_estats(struct tg3 * tp)8044 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8045 {
8046 	struct tg3_ethtool_stats *estats = &tp->estats;
8047 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8048 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
8049 
8050 	if (!hw_stats)
8051 		return old_estats;
8052 
8053 	ESTAT_ADD(rx_octets);
8054 	ESTAT_ADD(rx_fragments);
8055 	ESTAT_ADD(rx_ucast_packets);
8056 	ESTAT_ADD(rx_mcast_packets);
8057 	ESTAT_ADD(rx_bcast_packets);
8058 	ESTAT_ADD(rx_fcs_errors);
8059 	ESTAT_ADD(rx_align_errors);
8060 	ESTAT_ADD(rx_xon_pause_rcvd);
8061 	ESTAT_ADD(rx_xoff_pause_rcvd);
8062 	ESTAT_ADD(rx_mac_ctrl_rcvd);
8063 	ESTAT_ADD(rx_xoff_entered);
8064 	ESTAT_ADD(rx_frame_too_long_errors);
8065 	ESTAT_ADD(rx_jabbers);
8066 	ESTAT_ADD(rx_undersize_packets);
8067 	ESTAT_ADD(rx_in_length_errors);
8068 	ESTAT_ADD(rx_out_length_errors);
8069 	ESTAT_ADD(rx_64_or_less_octet_packets);
8070 	ESTAT_ADD(rx_65_to_127_octet_packets);
8071 	ESTAT_ADD(rx_128_to_255_octet_packets);
8072 	ESTAT_ADD(rx_256_to_511_octet_packets);
8073 	ESTAT_ADD(rx_512_to_1023_octet_packets);
8074 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
8075 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
8076 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
8077 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
8078 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
8079 
8080 	ESTAT_ADD(tx_octets);
8081 	ESTAT_ADD(tx_collisions);
8082 	ESTAT_ADD(tx_xon_sent);
8083 	ESTAT_ADD(tx_xoff_sent);
8084 	ESTAT_ADD(tx_flow_control);
8085 	ESTAT_ADD(tx_mac_errors);
8086 	ESTAT_ADD(tx_single_collisions);
8087 	ESTAT_ADD(tx_mult_collisions);
8088 	ESTAT_ADD(tx_deferred);
8089 	ESTAT_ADD(tx_excessive_collisions);
8090 	ESTAT_ADD(tx_late_collisions);
8091 	ESTAT_ADD(tx_collide_2times);
8092 	ESTAT_ADD(tx_collide_3times);
8093 	ESTAT_ADD(tx_collide_4times);
8094 	ESTAT_ADD(tx_collide_5times);
8095 	ESTAT_ADD(tx_collide_6times);
8096 	ESTAT_ADD(tx_collide_7times);
8097 	ESTAT_ADD(tx_collide_8times);
8098 	ESTAT_ADD(tx_collide_9times);
8099 	ESTAT_ADD(tx_collide_10times);
8100 	ESTAT_ADD(tx_collide_11times);
8101 	ESTAT_ADD(tx_collide_12times);
8102 	ESTAT_ADD(tx_collide_13times);
8103 	ESTAT_ADD(tx_collide_14times);
8104 	ESTAT_ADD(tx_collide_15times);
8105 	ESTAT_ADD(tx_ucast_packets);
8106 	ESTAT_ADD(tx_mcast_packets);
8107 	ESTAT_ADD(tx_bcast_packets);
8108 	ESTAT_ADD(tx_carrier_sense_errors);
8109 	ESTAT_ADD(tx_discards);
8110 	ESTAT_ADD(tx_errors);
8111 
8112 	ESTAT_ADD(dma_writeq_full);
8113 	ESTAT_ADD(dma_write_prioq_full);
8114 	ESTAT_ADD(rxbds_empty);
8115 	ESTAT_ADD(rx_discards);
8116 	ESTAT_ADD(rx_errors);
8117 	ESTAT_ADD(rx_threshold_hit);
8118 
8119 	ESTAT_ADD(dma_readq_full);
8120 	ESTAT_ADD(dma_read_prioq_full);
8121 	ESTAT_ADD(tx_comp_queue_full);
8122 
8123 	ESTAT_ADD(ring_set_send_prod_index);
8124 	ESTAT_ADD(ring_status_update);
8125 	ESTAT_ADD(nic_irqs);
8126 	ESTAT_ADD(nic_avoided_irqs);
8127 	ESTAT_ADD(nic_tx_threshold_hit);
8128 
8129 	return estats;
8130 }
8131 
tg3_get_stats(struct net_device * dev)8132 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8133 {
8134 	struct tg3 *tp = netdev_priv(dev);
8135 	struct net_device_stats *stats = &tp->net_stats;
8136 	struct net_device_stats *old_stats = &tp->net_stats_prev;
8137 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
8138 
8139 	if (!hw_stats)
8140 		return old_stats;
8141 
8142 	stats->rx_packets = old_stats->rx_packets +
8143 		get_stat64(&hw_stats->rx_ucast_packets) +
8144 		get_stat64(&hw_stats->rx_mcast_packets) +
8145 		get_stat64(&hw_stats->rx_bcast_packets);
8146 
8147 	stats->tx_packets = old_stats->tx_packets +
8148 		get_stat64(&hw_stats->tx_ucast_packets) +
8149 		get_stat64(&hw_stats->tx_mcast_packets) +
8150 		get_stat64(&hw_stats->tx_bcast_packets);
8151 
8152 	stats->rx_bytes = old_stats->rx_bytes +
8153 		get_stat64(&hw_stats->rx_octets);
8154 	stats->tx_bytes = old_stats->tx_bytes +
8155 		get_stat64(&hw_stats->tx_octets);
8156 
8157 	stats->rx_errors = old_stats->rx_errors +
8158 		get_stat64(&hw_stats->rx_errors);
8159 	stats->tx_errors = old_stats->tx_errors +
8160 		get_stat64(&hw_stats->tx_errors) +
8161 		get_stat64(&hw_stats->tx_mac_errors) +
8162 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
8163 		get_stat64(&hw_stats->tx_discards);
8164 
8165 	stats->multicast = old_stats->multicast +
8166 		get_stat64(&hw_stats->rx_mcast_packets);
8167 	stats->collisions = old_stats->collisions +
8168 		get_stat64(&hw_stats->tx_collisions);
8169 
8170 	stats->rx_length_errors = old_stats->rx_length_errors +
8171 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
8172 		get_stat64(&hw_stats->rx_undersize_packets);
8173 
8174 	stats->rx_over_errors = old_stats->rx_over_errors +
8175 		get_stat64(&hw_stats->rxbds_empty);
8176 	stats->rx_frame_errors = old_stats->rx_frame_errors +
8177 		get_stat64(&hw_stats->rx_align_errors);
8178 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8179 		get_stat64(&hw_stats->tx_discards);
8180 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8181 		get_stat64(&hw_stats->tx_carrier_sense_errors);
8182 
8183 	stats->rx_crc_errors = old_stats->rx_crc_errors +
8184 		calc_crc_errors(tp);
8185 
8186 	stats->rx_missed_errors = old_stats->rx_missed_errors +
8187 		get_stat64(&hw_stats->rx_discards);
8188 
8189 	return stats;
8190 }
8191 
calc_crc(unsigned char * buf,int len)8192 static inline u32 calc_crc(unsigned char *buf, int len)
8193 {
8194 	u32 reg;
8195 	u32 tmp;
8196 	int j, k;
8197 
8198 	reg = 0xffffffff;
8199 
8200 	for (j = 0; j < len; j++) {
8201 		reg ^= buf[j];
8202 
8203 		for (k = 0; k < 8; k++) {
8204 			tmp = reg & 0x01;
8205 
8206 			reg >>= 1;
8207 
8208 			if (tmp) {
8209 				reg ^= 0xedb88320;
8210 			}
8211 		}
8212 	}
8213 
8214 	return ~reg;
8215 }
8216 
tg3_set_multi(struct tg3 * tp,unsigned int accept_all)8217 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8218 {
8219 	/* accept or reject all multicast frames */
8220 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8221 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8222 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8223 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8224 }
8225 
__tg3_set_rx_mode(struct net_device * dev)8226 static void __tg3_set_rx_mode(struct net_device *dev)
8227 {
8228 	struct tg3 *tp = netdev_priv(dev);
8229 	u32 rx_mode;
8230 
8231 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8232 				  RX_MODE_KEEP_VLAN_TAG);
8233 
8234 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8235 	 * flag clear.
8236 	 */
8237 #if TG3_VLAN_TAG_USED
8238 	if (!tp->vlgrp &&
8239 	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8240 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8241 #else
8242 	/* By definition, VLAN is disabled always in this
8243 	 * case.
8244 	 */
8245 	if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8246 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8247 #endif
8248 
8249 	if (dev->flags & IFF_PROMISC) {
8250 		/* Promiscuous mode. */
8251 		rx_mode |= RX_MODE_PROMISC;
8252 	} else if (dev->flags & IFF_ALLMULTI) {
8253 		/* Accept all multicast. */
8254 		tg3_set_multi (tp, 1);
8255 	} else if (dev->mc_count < 1) {
8256 		/* Reject all multicast. */
8257 		tg3_set_multi (tp, 0);
8258 	} else {
8259 		/* Accept one or more multicast(s). */
8260 		struct dev_mc_list *mclist;
8261 		unsigned int i;
8262 		u32 mc_filter[4] = { 0, };
8263 		u32 regidx;
8264 		u32 bit;
8265 		u32 crc;
8266 
8267 		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8268 		     i++, mclist = mclist->next) {
8269 
8270 			crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8271 			bit = ~crc & 0x7f;
8272 			regidx = (bit & 0x60) >> 5;
8273 			bit &= 0x1f;
8274 			mc_filter[regidx] |= (1 << bit);
8275 		}
8276 
8277 		tw32(MAC_HASH_REG_0, mc_filter[0]);
8278 		tw32(MAC_HASH_REG_1, mc_filter[1]);
8279 		tw32(MAC_HASH_REG_2, mc_filter[2]);
8280 		tw32(MAC_HASH_REG_3, mc_filter[3]);
8281 	}
8282 
8283 	if (rx_mode != tp->rx_mode) {
8284 		tp->rx_mode = rx_mode;
8285 		tw32_f(MAC_RX_MODE, rx_mode);
8286 		udelay(10);
8287 	}
8288 }
8289 
tg3_set_rx_mode(struct net_device * dev)8290 static void tg3_set_rx_mode(struct net_device *dev)
8291 {
8292 	struct tg3 *tp = netdev_priv(dev);
8293 
8294 	if (!netif_running(dev))
8295 		return;
8296 
8297 	tg3_full_lock(tp, 0);
8298 	__tg3_set_rx_mode(dev);
8299 	tg3_full_unlock(tp);
8300 }
8301 
8302 #define TG3_REGDUMP_LEN		(32 * 1024)
8303 
tg3_get_regs_len(struct net_device * dev)8304 static int tg3_get_regs_len(struct net_device *dev)
8305 {
8306 	return TG3_REGDUMP_LEN;
8307 }
8308 
tg3_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)8309 static void tg3_get_regs(struct net_device *dev,
8310 		struct ethtool_regs *regs, void *_p)
8311 {
8312 	u32 *p = _p;
8313 	struct tg3 *tp = netdev_priv(dev);
8314 	u8 *orig_p = _p;
8315 	int i;
8316 
8317 	regs->version = 0;
8318 
8319 	memset(p, 0, TG3_REGDUMP_LEN);
8320 
8321 	if (tp->link_config.phy_is_low_power)
8322 		return;
8323 
8324 	tg3_full_lock(tp, 0);
8325 
8326 #define __GET_REG32(reg)	(*(p)++ = tr32(reg))
8327 #define GET_REG32_LOOP(base,len)		\
8328 do {	p = (u32 *)(orig_p + (base));		\
8329 	for (i = 0; i < len; i += 4)		\
8330 		__GET_REG32((base) + i);	\
8331 } while (0)
8332 #define GET_REG32_1(reg)			\
8333 do {	p = (u32 *)(orig_p + (reg));		\
8334 	__GET_REG32((reg));			\
8335 } while (0)
8336 
8337 	GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8338 	GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8339 	GET_REG32_LOOP(MAC_MODE, 0x4f0);
8340 	GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8341 	GET_REG32_1(SNDDATAC_MODE);
8342 	GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8343 	GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8344 	GET_REG32_1(SNDBDC_MODE);
8345 	GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8346 	GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8347 	GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8348 	GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8349 	GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8350 	GET_REG32_1(RCVDCC_MODE);
8351 	GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8352 	GET_REG32_LOOP(RCVCC_MODE, 0x14);
8353 	GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8354 	GET_REG32_1(MBFREE_MODE);
8355 	GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8356 	GET_REG32_LOOP(MEMARB_MODE, 0x10);
8357 	GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8358 	GET_REG32_LOOP(RDMAC_MODE, 0x08);
8359 	GET_REG32_LOOP(WDMAC_MODE, 0x08);
8360 	GET_REG32_1(RX_CPU_MODE);
8361 	GET_REG32_1(RX_CPU_STATE);
8362 	GET_REG32_1(RX_CPU_PGMCTR);
8363 	GET_REG32_1(RX_CPU_HWBKPT);
8364 	GET_REG32_1(TX_CPU_MODE);
8365 	GET_REG32_1(TX_CPU_STATE);
8366 	GET_REG32_1(TX_CPU_PGMCTR);
8367 	GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8368 	GET_REG32_LOOP(FTQ_RESET, 0x120);
8369 	GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8370 	GET_REG32_1(DMAC_MODE);
8371 	GET_REG32_LOOP(GRC_MODE, 0x4c);
8372 	if (tp->tg3_flags & TG3_FLAG_NVRAM)
8373 		GET_REG32_LOOP(NVRAM_CMD, 0x24);
8374 
8375 #undef __GET_REG32
8376 #undef GET_REG32_LOOP
8377 #undef GET_REG32_1
8378 
8379 	tg3_full_unlock(tp);
8380 }
8381 
tg3_get_eeprom_len(struct net_device * dev)8382 static int tg3_get_eeprom_len(struct net_device *dev)
8383 {
8384 	struct tg3 *tp = netdev_priv(dev);
8385 
8386 	return tp->nvram_size;
8387 }
8388 
8389 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8390 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8391 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8392 
tg3_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)8393 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8394 {
8395 	struct tg3 *tp = netdev_priv(dev);
8396 	int ret;
8397 	u8  *pd;
8398 	u32 i, offset, len, b_offset, b_count;
8399 	__le32 val;
8400 
8401 	if (tp->link_config.phy_is_low_power)
8402 		return -EAGAIN;
8403 
8404 	offset = eeprom->offset;
8405 	len = eeprom->len;
8406 	eeprom->len = 0;
8407 
8408 	eeprom->magic = TG3_EEPROM_MAGIC;
8409 
8410 	if (offset & 3) {
8411 		/* adjustments to start on required 4 byte boundary */
8412 		b_offset = offset & 3;
8413 		b_count = 4 - b_offset;
8414 		if (b_count > len) {
8415 			/* i.e. offset=1 len=2 */
8416 			b_count = len;
8417 		}
8418 		ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8419 		if (ret)
8420 			return ret;
8421 		memcpy(data, ((char*)&val) + b_offset, b_count);
8422 		len -= b_count;
8423 		offset += b_count;
8424 	        eeprom->len += b_count;
8425 	}
8426 
8427 	/* read bytes upto the last 4 byte boundary */
8428 	pd = &data[eeprom->len];
8429 	for (i = 0; i < (len - (len & 3)); i += 4) {
8430 		ret = tg3_nvram_read_le(tp, offset + i, &val);
8431 		if (ret) {
8432 			eeprom->len += i;
8433 			return ret;
8434 		}
8435 		memcpy(pd + i, &val, 4);
8436 	}
8437 	eeprom->len += i;
8438 
8439 	if (len & 3) {
8440 		/* read last bytes not ending on 4 byte boundary */
8441 		pd = &data[eeprom->len];
8442 		b_count = len & 3;
8443 		b_offset = offset + len - b_count;
8444 		ret = tg3_nvram_read_le(tp, b_offset, &val);
8445 		if (ret)
8446 			return ret;
8447 		memcpy(pd, &val, b_count);
8448 		eeprom->len += b_count;
8449 	}
8450 	return 0;
8451 }
8452 
8453 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8454 
tg3_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)8455 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8456 {
8457 	struct tg3 *tp = netdev_priv(dev);
8458 	int ret;
8459 	u32 offset, len, b_offset, odd_len;
8460 	u8 *buf;
8461 	__le32 start, end;
8462 
8463 	if (tp->link_config.phy_is_low_power)
8464 		return -EAGAIN;
8465 
8466 	if (eeprom->magic != TG3_EEPROM_MAGIC)
8467 		return -EINVAL;
8468 
8469 	offset = eeprom->offset;
8470 	len = eeprom->len;
8471 
8472 	if ((b_offset = (offset & 3))) {
8473 		/* adjustments to start on required 4 byte boundary */
8474 		ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8475 		if (ret)
8476 			return ret;
8477 		len += b_offset;
8478 		offset &= ~3;
8479 		if (len < 4)
8480 			len = 4;
8481 	}
8482 
8483 	odd_len = 0;
8484 	if (len & 3) {
8485 		/* adjustments to end on required 4 byte boundary */
8486 		odd_len = 1;
8487 		len = (len + 3) & ~3;
8488 		ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8489 		if (ret)
8490 			return ret;
8491 	}
8492 
8493 	buf = data;
8494 	if (b_offset || odd_len) {
8495 		buf = kmalloc(len, GFP_KERNEL);
8496 		if (!buf)
8497 			return -ENOMEM;
8498 		if (b_offset)
8499 			memcpy(buf, &start, 4);
8500 		if (odd_len)
8501 			memcpy(buf+len-4, &end, 4);
8502 		memcpy(buf + b_offset, data, eeprom->len);
8503 	}
8504 
8505 	ret = tg3_nvram_write_block(tp, offset, len, buf);
8506 
8507 	if (buf != data)
8508 		kfree(buf);
8509 
8510 	return ret;
8511 }
8512 
tg3_get_settings(struct net_device * dev,struct ethtool_cmd * cmd)8513 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8514 {
8515 	struct tg3 *tp = netdev_priv(dev);
8516 
8517 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8518 		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8519 			return -EAGAIN;
8520 		return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
8521 	}
8522 
8523 	cmd->supported = (SUPPORTED_Autoneg);
8524 
8525 	if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8526 		cmd->supported |= (SUPPORTED_1000baseT_Half |
8527 				   SUPPORTED_1000baseT_Full);
8528 
8529 	if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8530 		cmd->supported |= (SUPPORTED_100baseT_Half |
8531 				  SUPPORTED_100baseT_Full |
8532 				  SUPPORTED_10baseT_Half |
8533 				  SUPPORTED_10baseT_Full |
8534 				  SUPPORTED_TP);
8535 		cmd->port = PORT_TP;
8536 	} else {
8537 		cmd->supported |= SUPPORTED_FIBRE;
8538 		cmd->port = PORT_FIBRE;
8539 	}
8540 
8541 	cmd->advertising = tp->link_config.advertising;
8542 	if (netif_running(dev)) {
8543 		cmd->speed = tp->link_config.active_speed;
8544 		cmd->duplex = tp->link_config.active_duplex;
8545 	}
8546 	cmd->phy_address = PHY_ADDR;
8547 	cmd->transceiver = 0;
8548 	cmd->autoneg = tp->link_config.autoneg;
8549 	cmd->maxtxpkt = 0;
8550 	cmd->maxrxpkt = 0;
8551 	return 0;
8552 }
8553 
tg3_set_settings(struct net_device * dev,struct ethtool_cmd * cmd)8554 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8555 {
8556 	struct tg3 *tp = netdev_priv(dev);
8557 
8558 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8559 		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8560 			return -EAGAIN;
8561 		return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
8562 	}
8563 
8564 	if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8565 		/* These are the only valid advertisement bits allowed.  */
8566 		if (cmd->autoneg == AUTONEG_ENABLE &&
8567 		    (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8568 					  ADVERTISED_1000baseT_Full |
8569 					  ADVERTISED_Autoneg |
8570 					  ADVERTISED_FIBRE)))
8571 			return -EINVAL;
8572 		/* Fiber can only do SPEED_1000.  */
8573 		else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8574 			 (cmd->speed != SPEED_1000))
8575 			return -EINVAL;
8576 	/* Copper cannot force SPEED_1000.  */
8577 	} else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8578 		   (cmd->speed == SPEED_1000))
8579 		return -EINVAL;
8580 	else if ((cmd->speed == SPEED_1000) &&
8581 		 (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8582 		return -EINVAL;
8583 
8584 	tg3_full_lock(tp, 0);
8585 
8586 	tp->link_config.autoneg = cmd->autoneg;
8587 	if (cmd->autoneg == AUTONEG_ENABLE) {
8588 		tp->link_config.advertising = (cmd->advertising |
8589 					      ADVERTISED_Autoneg);
8590 		tp->link_config.speed = SPEED_INVALID;
8591 		tp->link_config.duplex = DUPLEX_INVALID;
8592 	} else {
8593 		tp->link_config.advertising = 0;
8594 		tp->link_config.speed = cmd->speed;
8595 		tp->link_config.duplex = cmd->duplex;
8596 	}
8597 
8598 	tp->link_config.orig_speed = tp->link_config.speed;
8599 	tp->link_config.orig_duplex = tp->link_config.duplex;
8600 	tp->link_config.orig_autoneg = tp->link_config.autoneg;
8601 
8602 	if (netif_running(dev))
8603 		tg3_setup_phy(tp, 1);
8604 
8605 	tg3_full_unlock(tp);
8606 
8607 	return 0;
8608 }
8609 
tg3_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)8610 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8611 {
8612 	struct tg3 *tp = netdev_priv(dev);
8613 
8614 	strcpy(info->driver, DRV_MODULE_NAME);
8615 	strcpy(info->version, DRV_MODULE_VERSION);
8616 	strcpy(info->fw_version, tp->fw_ver);
8617 	strcpy(info->bus_info, pci_name(tp->pdev));
8618 }
8619 
tg3_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)8620 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8621 {
8622 	struct tg3 *tp = netdev_priv(dev);
8623 
8624 	if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
8625 	    device_can_wakeup(&tp->pdev->dev))
8626 		wol->supported = WAKE_MAGIC;
8627 	else
8628 		wol->supported = 0;
8629 	wol->wolopts = 0;
8630 	if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
8631 	    device_can_wakeup(&tp->pdev->dev))
8632 		wol->wolopts = WAKE_MAGIC;
8633 	memset(&wol->sopass, 0, sizeof(wol->sopass));
8634 }
8635 
tg3_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)8636 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8637 {
8638 	struct tg3 *tp = netdev_priv(dev);
8639 	struct device *dp = &tp->pdev->dev;
8640 
8641 	if (wol->wolopts & ~WAKE_MAGIC)
8642 		return -EINVAL;
8643 	if ((wol->wolopts & WAKE_MAGIC) &&
8644 	    !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
8645 		return -EINVAL;
8646 
8647 	spin_lock_bh(&tp->lock);
8648 	if (wol->wolopts & WAKE_MAGIC) {
8649 		tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8650 		device_set_wakeup_enable(dp, true);
8651 	} else {
8652 		tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8653 		device_set_wakeup_enable(dp, false);
8654 	}
8655 	spin_unlock_bh(&tp->lock);
8656 
8657 	return 0;
8658 }
8659 
tg3_get_msglevel(struct net_device * dev)8660 static u32 tg3_get_msglevel(struct net_device *dev)
8661 {
8662 	struct tg3 *tp = netdev_priv(dev);
8663 	return tp->msg_enable;
8664 }
8665 
tg3_set_msglevel(struct net_device * dev,u32 value)8666 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8667 {
8668 	struct tg3 *tp = netdev_priv(dev);
8669 	tp->msg_enable = value;
8670 }
8671 
tg3_set_tso(struct net_device * dev,u32 value)8672 static int tg3_set_tso(struct net_device *dev, u32 value)
8673 {
8674 	struct tg3 *tp = netdev_priv(dev);
8675 
8676 	if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8677 		if (value)
8678 			return -EINVAL;
8679 		return 0;
8680 	}
8681 	if ((dev->features & NETIF_F_IPV6_CSUM) &&
8682 	    (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) {
8683 		if (value) {
8684 			dev->features |= NETIF_F_TSO6;
8685 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8686 			    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
8687 			     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
8688 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8689 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8690 				dev->features |= NETIF_F_TSO_ECN;
8691 		} else
8692 			dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8693 	}
8694 	return ethtool_op_set_tso(dev, value);
8695 }
8696 
tg3_nway_reset(struct net_device * dev)8697 static int tg3_nway_reset(struct net_device *dev)
8698 {
8699 	struct tg3 *tp = netdev_priv(dev);
8700 	int r;
8701 
8702 	if (!netif_running(dev))
8703 		return -EAGAIN;
8704 
8705 	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8706 		return -EINVAL;
8707 
8708 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8709 		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8710 			return -EAGAIN;
8711 		r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
8712 	} else {
8713 		u32 bmcr;
8714 
8715 		spin_lock_bh(&tp->lock);
8716 		r = -EINVAL;
8717 		tg3_readphy(tp, MII_BMCR, &bmcr);
8718 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8719 		    ((bmcr & BMCR_ANENABLE) ||
8720 		     (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8721 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8722 						   BMCR_ANENABLE);
8723 			r = 0;
8724 		}
8725 		spin_unlock_bh(&tp->lock);
8726 	}
8727 
8728 	return r;
8729 }
8730 
tg3_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)8731 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8732 {
8733 	struct tg3 *tp = netdev_priv(dev);
8734 
8735 	ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8736 	ering->rx_mini_max_pending = 0;
8737 	if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8738 		ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8739 	else
8740 		ering->rx_jumbo_max_pending = 0;
8741 
8742 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8743 
8744 	ering->rx_pending = tp->rx_pending;
8745 	ering->rx_mini_pending = 0;
8746 	if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8747 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8748 	else
8749 		ering->rx_jumbo_pending = 0;
8750 
8751 	ering->tx_pending = tp->tx_pending;
8752 }
8753 
tg3_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)8754 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8755 {
8756 	struct tg3 *tp = netdev_priv(dev);
8757 	int irq_sync = 0, err = 0;
8758 
8759 	if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8760 	    (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8761 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8762 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
8763 	    ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8764 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8765 		return -EINVAL;
8766 
8767 	if (netif_running(dev)) {
8768 		tg3_phy_stop(tp);
8769 		tg3_netif_stop(tp);
8770 		irq_sync = 1;
8771 	}
8772 
8773 	tg3_full_lock(tp, irq_sync);
8774 
8775 	tp->rx_pending = ering->rx_pending;
8776 
8777 	if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8778 	    tp->rx_pending > 63)
8779 		tp->rx_pending = 63;
8780 	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8781 	tp->tx_pending = ering->tx_pending;
8782 
8783 	if (netif_running(dev)) {
8784 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8785 		err = tg3_restart_hw(tp, 1);
8786 		if (!err)
8787 			tg3_netif_start(tp);
8788 	}
8789 
8790 	tg3_full_unlock(tp);
8791 
8792 	if (irq_sync && !err)
8793 		tg3_phy_start(tp);
8794 
8795 	return err;
8796 }
8797 
tg3_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)8798 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8799 {
8800 	struct tg3 *tp = netdev_priv(dev);
8801 
8802 	epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8803 
8804 	if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
8805 		epause->rx_pause = 1;
8806 	else
8807 		epause->rx_pause = 0;
8808 
8809 	if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
8810 		epause->tx_pause = 1;
8811 	else
8812 		epause->tx_pause = 0;
8813 }
8814 
tg3_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)8815 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8816 {
8817 	struct tg3 *tp = netdev_priv(dev);
8818 	int err = 0;
8819 
8820 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8821 		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8822 			return -EAGAIN;
8823 
8824 		if (epause->autoneg) {
8825 			u32 newadv;
8826 			struct phy_device *phydev;
8827 
8828 			phydev = tp->mdio_bus->phy_map[PHY_ADDR];
8829 
8830 			if (epause->rx_pause) {
8831 				if (epause->tx_pause)
8832 					newadv = ADVERTISED_Pause;
8833 				else
8834 					newadv = ADVERTISED_Pause |
8835 						 ADVERTISED_Asym_Pause;
8836 			} else if (epause->tx_pause) {
8837 				newadv = ADVERTISED_Asym_Pause;
8838 			} else
8839 				newadv = 0;
8840 
8841 			if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
8842 				u32 oldadv = phydev->advertising &
8843 					     (ADVERTISED_Pause |
8844 					      ADVERTISED_Asym_Pause);
8845 				if (oldadv != newadv) {
8846 					phydev->advertising &=
8847 						~(ADVERTISED_Pause |
8848 						  ADVERTISED_Asym_Pause);
8849 					phydev->advertising |= newadv;
8850 					err = phy_start_aneg(phydev);
8851 				}
8852 			} else {
8853 				tp->link_config.advertising &=
8854 						~(ADVERTISED_Pause |
8855 						  ADVERTISED_Asym_Pause);
8856 				tp->link_config.advertising |= newadv;
8857 			}
8858 		} else {
8859 			if (epause->rx_pause)
8860 				tp->link_config.flowctrl |= FLOW_CTRL_RX;
8861 			else
8862 				tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
8863 
8864 			if (epause->tx_pause)
8865 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
8866 			else
8867 				tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
8868 
8869 			if (netif_running(dev))
8870 				tg3_setup_flow_control(tp, 0, 0);
8871 		}
8872 	} else {
8873 		int irq_sync = 0;
8874 
8875 		if (netif_running(dev)) {
8876 			tg3_netif_stop(tp);
8877 			irq_sync = 1;
8878 		}
8879 
8880 		tg3_full_lock(tp, irq_sync);
8881 
8882 		if (epause->autoneg)
8883 			tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8884 		else
8885 			tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8886 		if (epause->rx_pause)
8887 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
8888 		else
8889 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
8890 		if (epause->tx_pause)
8891 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
8892 		else
8893 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
8894 
8895 		if (netif_running(dev)) {
8896 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8897 			err = tg3_restart_hw(tp, 1);
8898 			if (!err)
8899 				tg3_netif_start(tp);
8900 		}
8901 
8902 		tg3_full_unlock(tp);
8903 	}
8904 
8905 	return err;
8906 }
8907 
tg3_get_rx_csum(struct net_device * dev)8908 static u32 tg3_get_rx_csum(struct net_device *dev)
8909 {
8910 	struct tg3 *tp = netdev_priv(dev);
8911 	return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8912 }
8913 
tg3_set_rx_csum(struct net_device * dev,u32 data)8914 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8915 {
8916 	struct tg3 *tp = netdev_priv(dev);
8917 
8918 	if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8919 		if (data != 0)
8920 			return -EINVAL;
8921   		return 0;
8922   	}
8923 
8924 	spin_lock_bh(&tp->lock);
8925 	if (data)
8926 		tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8927 	else
8928 		tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8929 	spin_unlock_bh(&tp->lock);
8930 
8931 	return 0;
8932 }
8933 
tg3_set_tx_csum(struct net_device * dev,u32 data)8934 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8935 {
8936 	struct tg3 *tp = netdev_priv(dev);
8937 
8938 	if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8939 		if (data != 0)
8940 			return -EINVAL;
8941   		return 0;
8942   	}
8943 
8944 	if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8945 		ethtool_op_set_tx_ipv6_csum(dev, data);
8946 	else
8947 		ethtool_op_set_tx_csum(dev, data);
8948 
8949 	return 0;
8950 }
8951 
tg3_get_sset_count(struct net_device * dev,int sset)8952 static int tg3_get_sset_count (struct net_device *dev, int sset)
8953 {
8954 	switch (sset) {
8955 	case ETH_SS_TEST:
8956 		return TG3_NUM_TEST;
8957 	case ETH_SS_STATS:
8958 		return TG3_NUM_STATS;
8959 	default:
8960 		return -EOPNOTSUPP;
8961 	}
8962 }
8963 
tg3_get_strings(struct net_device * dev,u32 stringset,u8 * buf)8964 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8965 {
8966 	switch (stringset) {
8967 	case ETH_SS_STATS:
8968 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8969 		break;
8970 	case ETH_SS_TEST:
8971 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8972 		break;
8973 	default:
8974 		WARN_ON(1);	/* we need a WARN() */
8975 		break;
8976 	}
8977 }
8978 
tg3_phys_id(struct net_device * dev,u32 data)8979 static int tg3_phys_id(struct net_device *dev, u32 data)
8980 {
8981 	struct tg3 *tp = netdev_priv(dev);
8982 	int i;
8983 
8984 	if (!netif_running(tp->dev))
8985 		return -EAGAIN;
8986 
8987 	if (data == 0)
8988 		data = UINT_MAX / 2;
8989 
8990 	for (i = 0; i < (data * 2); i++) {
8991 		if ((i % 2) == 0)
8992 			tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8993 					   LED_CTRL_1000MBPS_ON |
8994 					   LED_CTRL_100MBPS_ON |
8995 					   LED_CTRL_10MBPS_ON |
8996 					   LED_CTRL_TRAFFIC_OVERRIDE |
8997 					   LED_CTRL_TRAFFIC_BLINK |
8998 					   LED_CTRL_TRAFFIC_LED);
8999 
9000 		else
9001 			tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9002 					   LED_CTRL_TRAFFIC_OVERRIDE);
9003 
9004 		if (msleep_interruptible(500))
9005 			break;
9006 	}
9007 	tw32(MAC_LED_CTRL, tp->led_ctrl);
9008 	return 0;
9009 }
9010 
tg3_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)9011 static void tg3_get_ethtool_stats (struct net_device *dev,
9012 				   struct ethtool_stats *estats, u64 *tmp_stats)
9013 {
9014 	struct tg3 *tp = netdev_priv(dev);
9015 	memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9016 }
9017 
9018 #define NVRAM_TEST_SIZE 0x100
9019 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
9020 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
9021 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
9022 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9023 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9024 
tg3_test_nvram(struct tg3 * tp)9025 static int tg3_test_nvram(struct tg3 *tp)
9026 {
9027 	u32 csum, magic;
9028 	__le32 *buf;
9029 	int i, j, k, err = 0, size;
9030 
9031 	if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9032 		return -EIO;
9033 
9034 	if (magic == TG3_EEPROM_MAGIC)
9035 		size = NVRAM_TEST_SIZE;
9036 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9037 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9038 		    TG3_EEPROM_SB_FORMAT_1) {
9039 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9040 			case TG3_EEPROM_SB_REVISION_0:
9041 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9042 				break;
9043 			case TG3_EEPROM_SB_REVISION_2:
9044 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9045 				break;
9046 			case TG3_EEPROM_SB_REVISION_3:
9047 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9048 				break;
9049 			default:
9050 				return 0;
9051 			}
9052 		} else
9053 			return 0;
9054 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9055 		size = NVRAM_SELFBOOT_HW_SIZE;
9056 	else
9057 		return -EIO;
9058 
9059 	buf = kmalloc(size, GFP_KERNEL);
9060 	if (buf == NULL)
9061 		return -ENOMEM;
9062 
9063 	err = -EIO;
9064 	for (i = 0, j = 0; i < size; i += 4, j++) {
9065 		if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9066 			break;
9067 	}
9068 	if (i < size)
9069 		goto out;
9070 
9071 	/* Selfboot format */
9072 	magic = swab32(le32_to_cpu(buf[0]));
9073 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9074 	    TG3_EEPROM_MAGIC_FW) {
9075 		u8 *buf8 = (u8 *) buf, csum8 = 0;
9076 
9077 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9078 		    TG3_EEPROM_SB_REVISION_2) {
9079 			/* For rev 2, the csum doesn't include the MBA. */
9080 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9081 				csum8 += buf8[i];
9082 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9083 				csum8 += buf8[i];
9084 		} else {
9085 			for (i = 0; i < size; i++)
9086 				csum8 += buf8[i];
9087 		}
9088 
9089 		if (csum8 == 0) {
9090 			err = 0;
9091 			goto out;
9092 		}
9093 
9094 		err = -EIO;
9095 		goto out;
9096 	}
9097 
9098 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9099 	    TG3_EEPROM_MAGIC_HW) {
9100 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9101 	       	u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9102 		u8 *buf8 = (u8 *) buf;
9103 
9104 		/* Separate the parity bits and the data bytes.  */
9105 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9106 			if ((i == 0) || (i == 8)) {
9107 				int l;
9108 				u8 msk;
9109 
9110 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9111 					parity[k++] = buf8[i] & msk;
9112 				i++;
9113 			}
9114 			else if (i == 16) {
9115 				int l;
9116 				u8 msk;
9117 
9118 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9119 					parity[k++] = buf8[i] & msk;
9120 				i++;
9121 
9122 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9123 					parity[k++] = buf8[i] & msk;
9124 				i++;
9125 			}
9126 			data[j++] = buf8[i];
9127 		}
9128 
9129 		err = -EIO;
9130 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9131 			u8 hw8 = hweight8(data[i]);
9132 
9133 			if ((hw8 & 0x1) && parity[i])
9134 				goto out;
9135 			else if (!(hw8 & 0x1) && !parity[i])
9136 				goto out;
9137 		}
9138 		err = 0;
9139 		goto out;
9140 	}
9141 
9142 	/* Bootstrap checksum at offset 0x10 */
9143 	csum = calc_crc((unsigned char *) buf, 0x10);
9144 	if(csum != le32_to_cpu(buf[0x10/4]))
9145 		goto out;
9146 
9147 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9148 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9149 	if (csum != le32_to_cpu(buf[0xfc/4]))
9150 		 goto out;
9151 
9152 	err = 0;
9153 
9154 out:
9155 	kfree(buf);
9156 	return err;
9157 }
9158 
9159 #define TG3_SERDES_TIMEOUT_SEC	2
9160 #define TG3_COPPER_TIMEOUT_SEC	6
9161 
tg3_test_link(struct tg3 * tp)9162 static int tg3_test_link(struct tg3 *tp)
9163 {
9164 	int i, max;
9165 
9166 	if (!netif_running(tp->dev))
9167 		return -ENODEV;
9168 
9169 	if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9170 		max = TG3_SERDES_TIMEOUT_SEC;
9171 	else
9172 		max = TG3_COPPER_TIMEOUT_SEC;
9173 
9174 	for (i = 0; i < max; i++) {
9175 		if (netif_carrier_ok(tp->dev))
9176 			return 0;
9177 
9178 		if (msleep_interruptible(1000))
9179 			break;
9180 	}
9181 
9182 	return -EIO;
9183 }
9184 
9185 /* Only test the commonly used registers */
tg3_test_registers(struct tg3 * tp)9186 static int tg3_test_registers(struct tg3 *tp)
9187 {
9188 	int i, is_5705, is_5750;
9189 	u32 offset, read_mask, write_mask, val, save_val, read_val;
9190 	static struct {
9191 		u16 offset;
9192 		u16 flags;
9193 #define TG3_FL_5705	0x1
9194 #define TG3_FL_NOT_5705	0x2
9195 #define TG3_FL_NOT_5788	0x4
9196 #define TG3_FL_NOT_5750	0x8
9197 		u32 read_mask;
9198 		u32 write_mask;
9199 	} reg_tbl[] = {
9200 		/* MAC Control Registers */
9201 		{ MAC_MODE, TG3_FL_NOT_5705,
9202 			0x00000000, 0x00ef6f8c },
9203 		{ MAC_MODE, TG3_FL_5705,
9204 			0x00000000, 0x01ef6b8c },
9205 		{ MAC_STATUS, TG3_FL_NOT_5705,
9206 			0x03800107, 0x00000000 },
9207 		{ MAC_STATUS, TG3_FL_5705,
9208 			0x03800100, 0x00000000 },
9209 		{ MAC_ADDR_0_HIGH, 0x0000,
9210 			0x00000000, 0x0000ffff },
9211 		{ MAC_ADDR_0_LOW, 0x0000,
9212 		       	0x00000000, 0xffffffff },
9213 		{ MAC_RX_MTU_SIZE, 0x0000,
9214 			0x00000000, 0x0000ffff },
9215 		{ MAC_TX_MODE, 0x0000,
9216 			0x00000000, 0x00000070 },
9217 		{ MAC_TX_LENGTHS, 0x0000,
9218 			0x00000000, 0x00003fff },
9219 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
9220 			0x00000000, 0x000007fc },
9221 		{ MAC_RX_MODE, TG3_FL_5705,
9222 			0x00000000, 0x000007dc },
9223 		{ MAC_HASH_REG_0, 0x0000,
9224 			0x00000000, 0xffffffff },
9225 		{ MAC_HASH_REG_1, 0x0000,
9226 			0x00000000, 0xffffffff },
9227 		{ MAC_HASH_REG_2, 0x0000,
9228 			0x00000000, 0xffffffff },
9229 		{ MAC_HASH_REG_3, 0x0000,
9230 			0x00000000, 0xffffffff },
9231 
9232 		/* Receive Data and Receive BD Initiator Control Registers. */
9233 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9234 			0x00000000, 0xffffffff },
9235 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9236 			0x00000000, 0xffffffff },
9237 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9238 			0x00000000, 0x00000003 },
9239 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9240 			0x00000000, 0xffffffff },
9241 		{ RCVDBDI_STD_BD+0, 0x0000,
9242 			0x00000000, 0xffffffff },
9243 		{ RCVDBDI_STD_BD+4, 0x0000,
9244 			0x00000000, 0xffffffff },
9245 		{ RCVDBDI_STD_BD+8, 0x0000,
9246 			0x00000000, 0xffff0002 },
9247 		{ RCVDBDI_STD_BD+0xc, 0x0000,
9248 			0x00000000, 0xffffffff },
9249 
9250 		/* Receive BD Initiator Control Registers. */
9251 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9252 			0x00000000, 0xffffffff },
9253 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
9254 			0x00000000, 0x000003ff },
9255 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9256 			0x00000000, 0xffffffff },
9257 
9258 		/* Host Coalescing Control Registers. */
9259 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
9260 			0x00000000, 0x00000004 },
9261 		{ HOSTCC_MODE, TG3_FL_5705,
9262 			0x00000000, 0x000000f6 },
9263 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9264 			0x00000000, 0xffffffff },
9265 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9266 			0x00000000, 0x000003ff },
9267 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9268 			0x00000000, 0xffffffff },
9269 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9270 			0x00000000, 0x000003ff },
9271 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9272 			0x00000000, 0xffffffff },
9273 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9274 			0x00000000, 0x000000ff },
9275 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9276 			0x00000000, 0xffffffff },
9277 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9278 			0x00000000, 0x000000ff },
9279 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9280 			0x00000000, 0xffffffff },
9281 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9282 			0x00000000, 0xffffffff },
9283 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9284 			0x00000000, 0xffffffff },
9285 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9286 			0x00000000, 0x000000ff },
9287 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9288 			0x00000000, 0xffffffff },
9289 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9290 			0x00000000, 0x000000ff },
9291 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9292 			0x00000000, 0xffffffff },
9293 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9294 			0x00000000, 0xffffffff },
9295 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9296 			0x00000000, 0xffffffff },
9297 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9298 			0x00000000, 0xffffffff },
9299 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9300 			0x00000000, 0xffffffff },
9301 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9302 			0xffffffff, 0x00000000 },
9303 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9304 			0xffffffff, 0x00000000 },
9305 
9306 		/* Buffer Manager Control Registers. */
9307 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9308 			0x00000000, 0x007fff80 },
9309 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9310 			0x00000000, 0x007fffff },
9311 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9312 			0x00000000, 0x0000003f },
9313 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9314 			0x00000000, 0x000001ff },
9315 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
9316 			0x00000000, 0x000001ff },
9317 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9318 			0xffffffff, 0x00000000 },
9319 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9320 			0xffffffff, 0x00000000 },
9321 
9322 		/* Mailbox Registers */
9323 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9324 			0x00000000, 0x000001ff },
9325 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9326 			0x00000000, 0x000001ff },
9327 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9328 			0x00000000, 0x000007ff },
9329 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9330 			0x00000000, 0x000001ff },
9331 
9332 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
9333 	};
9334 
9335 	is_5705 = is_5750 = 0;
9336 	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9337 		is_5705 = 1;
9338 		if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9339 			is_5750 = 1;
9340 	}
9341 
9342 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9343 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9344 			continue;
9345 
9346 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9347 			continue;
9348 
9349 		if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9350 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
9351 			continue;
9352 
9353 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9354 			continue;
9355 
9356 		offset = (u32) reg_tbl[i].offset;
9357 		read_mask = reg_tbl[i].read_mask;
9358 		write_mask = reg_tbl[i].write_mask;
9359 
9360 		/* Save the original register content */
9361 		save_val = tr32(offset);
9362 
9363 		/* Determine the read-only value. */
9364 		read_val = save_val & read_mask;
9365 
9366 		/* Write zero to the register, then make sure the read-only bits
9367 		 * are not changed and the read/write bits are all zeros.
9368 		 */
9369 		tw32(offset, 0);
9370 
9371 		val = tr32(offset);
9372 
9373 		/* Test the read-only and read/write bits. */
9374 		if (((val & read_mask) != read_val) || (val & write_mask))
9375 			goto out;
9376 
9377 		/* Write ones to all the bits defined by RdMask and WrMask, then
9378 		 * make sure the read-only bits are not changed and the
9379 		 * read/write bits are all ones.
9380 		 */
9381 		tw32(offset, read_mask | write_mask);
9382 
9383 		val = tr32(offset);
9384 
9385 		/* Test the read-only bits. */
9386 		if ((val & read_mask) != read_val)
9387 			goto out;
9388 
9389 		/* Test the read/write bits. */
9390 		if ((val & write_mask) != write_mask)
9391 			goto out;
9392 
9393 		tw32(offset, save_val);
9394 	}
9395 
9396 	return 0;
9397 
9398 out:
9399 	if (netif_msg_hw(tp))
9400 		printk(KERN_ERR PFX "Register test failed at offset %x\n",
9401 		       offset);
9402 	tw32(offset, save_val);
9403 	return -EIO;
9404 }
9405 
tg3_do_mem_test(struct tg3 * tp,u32 offset,u32 len)9406 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9407 {
9408 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9409 	int i;
9410 	u32 j;
9411 
9412 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9413 		for (j = 0; j < len; j += 4) {
9414 			u32 val;
9415 
9416 			tg3_write_mem(tp, offset + j, test_pattern[i]);
9417 			tg3_read_mem(tp, offset + j, &val);
9418 			if (val != test_pattern[i])
9419 				return -EIO;
9420 		}
9421 	}
9422 	return 0;
9423 }
9424 
tg3_test_memory(struct tg3 * tp)9425 static int tg3_test_memory(struct tg3 *tp)
9426 {
9427 	static struct mem_entry {
9428 		u32 offset;
9429 		u32 len;
9430 	} mem_tbl_570x[] = {
9431 		{ 0x00000000, 0x00b50},
9432 		{ 0x00002000, 0x1c000},
9433 		{ 0xffffffff, 0x00000}
9434 	}, mem_tbl_5705[] = {
9435 		{ 0x00000100, 0x0000c},
9436 		{ 0x00000200, 0x00008},
9437 		{ 0x00004000, 0x00800},
9438 		{ 0x00006000, 0x01000},
9439 		{ 0x00008000, 0x02000},
9440 		{ 0x00010000, 0x0e000},
9441 		{ 0xffffffff, 0x00000}
9442 	}, mem_tbl_5755[] = {
9443 		{ 0x00000200, 0x00008},
9444 		{ 0x00004000, 0x00800},
9445 		{ 0x00006000, 0x00800},
9446 		{ 0x00008000, 0x02000},
9447 		{ 0x00010000, 0x0c000},
9448 		{ 0xffffffff, 0x00000}
9449 	}, mem_tbl_5906[] = {
9450 		{ 0x00000200, 0x00008},
9451 		{ 0x00004000, 0x00400},
9452 		{ 0x00006000, 0x00400},
9453 		{ 0x00008000, 0x01000},
9454 		{ 0x00010000, 0x01000},
9455 		{ 0xffffffff, 0x00000}
9456 	};
9457 	struct mem_entry *mem_tbl;
9458 	int err = 0;
9459 	int i;
9460 
9461 	if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9462 		mem_tbl = mem_tbl_5755;
9463 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9464 		mem_tbl = mem_tbl_5906;
9465 	else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9466 		mem_tbl = mem_tbl_5705;
9467 	else
9468 		mem_tbl = mem_tbl_570x;
9469 
9470 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9471 		if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9472 		    mem_tbl[i].len)) != 0)
9473 			break;
9474 	}
9475 
9476 	return err;
9477 }
9478 
9479 #define TG3_MAC_LOOPBACK	0
9480 #define TG3_PHY_LOOPBACK	1
9481 
tg3_run_loopback(struct tg3 * tp,int loopback_mode)9482 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9483 {
9484 	u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9485 	u32 desc_idx;
9486 	struct sk_buff *skb, *rx_skb;
9487 	u8 *tx_data;
9488 	dma_addr_t map;
9489 	int num_pkts, tx_len, rx_len, i, err;
9490 	struct tg3_rx_buffer_desc *desc;
9491 
9492 	if (loopback_mode == TG3_MAC_LOOPBACK) {
9493 		/* HW errata - mac loopback fails in some cases on 5780.
9494 		 * Normal traffic and PHY loopback are not affected by
9495 		 * errata.
9496 		 */
9497 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9498 			return 0;
9499 
9500 		mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9501 			   MAC_MODE_PORT_INT_LPBACK;
9502 		if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9503 			mac_mode |= MAC_MODE_LINK_POLARITY;
9504 		if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9505 			mac_mode |= MAC_MODE_PORT_MODE_MII;
9506 		else
9507 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
9508 		tw32(MAC_MODE, mac_mode);
9509 	} else if (loopback_mode == TG3_PHY_LOOPBACK) {
9510 		u32 val;
9511 
9512 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9513 			u32 phytest;
9514 
9515 			if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9516 				u32 phy;
9517 
9518 				tg3_writephy(tp, MII_TG3_EPHY_TEST,
9519 					     phytest | MII_TG3_EPHY_SHADOW_EN);
9520 				if (!tg3_readphy(tp, 0x1b, &phy))
9521 					tg3_writephy(tp, 0x1b, phy & ~0x20);
9522 				tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9523 			}
9524 			val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9525 		} else
9526 			val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9527 
9528 		tg3_phy_toggle_automdix(tp, 0);
9529 
9530 		tg3_writephy(tp, MII_BMCR, val);
9531 		udelay(40);
9532 
9533 		mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9534 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9535 			tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9536 			mac_mode |= MAC_MODE_PORT_MODE_MII;
9537 		} else
9538 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
9539 
9540 		/* reset to prevent losing 1st rx packet intermittently */
9541 		if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9542 			tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9543 			udelay(10);
9544 			tw32_f(MAC_RX_MODE, tp->rx_mode);
9545 		}
9546 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9547 			if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9548 				mac_mode &= ~MAC_MODE_LINK_POLARITY;
9549 			else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9550 				mac_mode |= MAC_MODE_LINK_POLARITY;
9551 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
9552 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9553 		}
9554 		tw32(MAC_MODE, mac_mode);
9555 	}
9556 	else
9557 		return -EINVAL;
9558 
9559 	err = -EIO;
9560 
9561 	tx_len = 1514;
9562 	skb = netdev_alloc_skb(tp->dev, tx_len);
9563 	if (!skb)
9564 		return -ENOMEM;
9565 
9566 	tx_data = skb_put(skb, tx_len);
9567 	memcpy(tx_data, tp->dev->dev_addr, 6);
9568 	memset(tx_data + 6, 0x0, 8);
9569 
9570 	tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9571 
9572 	for (i = 14; i < tx_len; i++)
9573 		tx_data[i] = (u8) (i & 0xff);
9574 
9575 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9576 
9577 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9578 	     HOSTCC_MODE_NOW);
9579 
9580 	udelay(10);
9581 
9582 	rx_start_idx = tp->hw_status->idx[0].rx_producer;
9583 
9584 	num_pkts = 0;
9585 
9586 	tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9587 
9588 	tp->tx_prod++;
9589 	num_pkts++;
9590 
9591 	tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9592 		     tp->tx_prod);
9593 	tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9594 
9595 	udelay(10);
9596 
9597 	/* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9598 	for (i = 0; i < 25; i++) {
9599 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9600 		       HOSTCC_MODE_NOW);
9601 
9602 		udelay(10);
9603 
9604 		tx_idx = tp->hw_status->idx[0].tx_consumer;
9605 		rx_idx = tp->hw_status->idx[0].rx_producer;
9606 		if ((tx_idx == tp->tx_prod) &&
9607 		    (rx_idx == (rx_start_idx + num_pkts)))
9608 			break;
9609 	}
9610 
9611 	pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9612 	dev_kfree_skb(skb);
9613 
9614 	if (tx_idx != tp->tx_prod)
9615 		goto out;
9616 
9617 	if (rx_idx != rx_start_idx + num_pkts)
9618 		goto out;
9619 
9620 	desc = &tp->rx_rcb[rx_start_idx];
9621 	desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9622 	opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9623 	if (opaque_key != RXD_OPAQUE_RING_STD)
9624 		goto out;
9625 
9626 	if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9627 	    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9628 		goto out;
9629 
9630 	rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9631 	if (rx_len != tx_len)
9632 		goto out;
9633 
9634 	rx_skb = tp->rx_std_buffers[desc_idx].skb;
9635 
9636 	map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9637 	pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9638 
9639 	for (i = 14; i < tx_len; i++) {
9640 		if (*(rx_skb->data + i) != (u8) (i & 0xff))
9641 			goto out;
9642 	}
9643 	err = 0;
9644 
9645 	/* tg3_free_rings will unmap and free the rx_skb */
9646 out:
9647 	return err;
9648 }
9649 
9650 #define TG3_MAC_LOOPBACK_FAILED		1
9651 #define TG3_PHY_LOOPBACK_FAILED		2
9652 #define TG3_LOOPBACK_FAILED		(TG3_MAC_LOOPBACK_FAILED |	\
9653 					 TG3_PHY_LOOPBACK_FAILED)
9654 
tg3_test_loopback(struct tg3 * tp)9655 static int tg3_test_loopback(struct tg3 *tp)
9656 {
9657 	int err = 0;
9658 	u32 cpmuctrl = 0;
9659 
9660 	if (!netif_running(tp->dev))
9661 		return TG3_LOOPBACK_FAILED;
9662 
9663 	err = tg3_reset_hw(tp, 1);
9664 	if (err)
9665 		return TG3_LOOPBACK_FAILED;
9666 
9667 	/* Turn off gphy autopowerdown. */
9668 	if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
9669 		tg3_phy_toggle_apd(tp, false);
9670 
9671 	if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9672 		int i;
9673 		u32 status;
9674 
9675 		tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9676 
9677 		/* Wait for up to 40 microseconds to acquire lock. */
9678 		for (i = 0; i < 4; i++) {
9679 			status = tr32(TG3_CPMU_MUTEX_GNT);
9680 			if (status == CPMU_MUTEX_GNT_DRIVER)
9681 				break;
9682 			udelay(10);
9683 		}
9684 
9685 		if (status != CPMU_MUTEX_GNT_DRIVER)
9686 			return TG3_LOOPBACK_FAILED;
9687 
9688 		/* Turn off link-based power management. */
9689 		cpmuctrl = tr32(TG3_CPMU_CTRL);
9690 		tw32(TG3_CPMU_CTRL,
9691 		     cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9692 				  CPMU_CTRL_LINK_AWARE_MODE));
9693 	}
9694 
9695 	if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9696 		err |= TG3_MAC_LOOPBACK_FAILED;
9697 
9698 	if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9699 		tw32(TG3_CPMU_CTRL, cpmuctrl);
9700 
9701 		/* Release the mutex */
9702 		tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9703 	}
9704 
9705 	if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
9706 	    !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
9707 		if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9708 			err |= TG3_PHY_LOOPBACK_FAILED;
9709 	}
9710 
9711 	/* Re-enable gphy autopowerdown. */
9712 	if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
9713 		tg3_phy_toggle_apd(tp, true);
9714 
9715 	return err;
9716 }
9717 
tg3_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * data)9718 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9719 			  u64 *data)
9720 {
9721 	struct tg3 *tp = netdev_priv(dev);
9722 
9723 	if (tp->link_config.phy_is_low_power)
9724 		tg3_set_power_state(tp, PCI_D0);
9725 
9726 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9727 
9728 	if (tg3_test_nvram(tp) != 0) {
9729 		etest->flags |= ETH_TEST_FL_FAILED;
9730 		data[0] = 1;
9731 	}
9732 	if (tg3_test_link(tp) != 0) {
9733 		etest->flags |= ETH_TEST_FL_FAILED;
9734 		data[1] = 1;
9735 	}
9736 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
9737 		int err, err2 = 0, irq_sync = 0;
9738 
9739 		if (netif_running(dev)) {
9740 			tg3_phy_stop(tp);
9741 			tg3_netif_stop(tp);
9742 			irq_sync = 1;
9743 		}
9744 
9745 		tg3_full_lock(tp, irq_sync);
9746 
9747 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9748 		err = tg3_nvram_lock(tp);
9749 		tg3_halt_cpu(tp, RX_CPU_BASE);
9750 		if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9751 			tg3_halt_cpu(tp, TX_CPU_BASE);
9752 		if (!err)
9753 			tg3_nvram_unlock(tp);
9754 
9755 		if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9756 			tg3_phy_reset(tp);
9757 
9758 		if (tg3_test_registers(tp) != 0) {
9759 			etest->flags |= ETH_TEST_FL_FAILED;
9760 			data[2] = 1;
9761 		}
9762 		if (tg3_test_memory(tp) != 0) {
9763 			etest->flags |= ETH_TEST_FL_FAILED;
9764 			data[3] = 1;
9765 		}
9766 		if ((data[4] = tg3_test_loopback(tp)) != 0)
9767 			etest->flags |= ETH_TEST_FL_FAILED;
9768 
9769 		tg3_full_unlock(tp);
9770 
9771 		if (tg3_test_interrupt(tp) != 0) {
9772 			etest->flags |= ETH_TEST_FL_FAILED;
9773 			data[5] = 1;
9774 		}
9775 
9776 		tg3_full_lock(tp, 0);
9777 
9778 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9779 		if (netif_running(dev)) {
9780 			tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9781 			err2 = tg3_restart_hw(tp, 1);
9782 			if (!err2)
9783 				tg3_netif_start(tp);
9784 		}
9785 
9786 		tg3_full_unlock(tp);
9787 
9788 		if (irq_sync && !err2)
9789 			tg3_phy_start(tp);
9790 	}
9791 	if (tp->link_config.phy_is_low_power)
9792 		tg3_set_power_state(tp, PCI_D3hot);
9793 
9794 }
9795 
tg3_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)9796 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9797 {
9798 	struct mii_ioctl_data *data = if_mii(ifr);
9799 	struct tg3 *tp = netdev_priv(dev);
9800 	int err;
9801 
9802 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9803 		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9804 			return -EAGAIN;
9805 		return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
9806 	}
9807 
9808 	switch(cmd) {
9809 	case SIOCGMIIPHY:
9810 		data->phy_id = PHY_ADDR;
9811 
9812 		/* fallthru */
9813 	case SIOCGMIIREG: {
9814 		u32 mii_regval;
9815 
9816 		if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9817 			break;			/* We have no PHY */
9818 
9819 		if (tp->link_config.phy_is_low_power)
9820 			return -EAGAIN;
9821 
9822 		spin_lock_bh(&tp->lock);
9823 		err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9824 		spin_unlock_bh(&tp->lock);
9825 
9826 		data->val_out = mii_regval;
9827 
9828 		return err;
9829 	}
9830 
9831 	case SIOCSMIIREG:
9832 		if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9833 			break;			/* We have no PHY */
9834 
9835 		if (!capable(CAP_NET_ADMIN))
9836 			return -EPERM;
9837 
9838 		if (tp->link_config.phy_is_low_power)
9839 			return -EAGAIN;
9840 
9841 		spin_lock_bh(&tp->lock);
9842 		err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9843 		spin_unlock_bh(&tp->lock);
9844 
9845 		return err;
9846 
9847 	default:
9848 		/* do nothing */
9849 		break;
9850 	}
9851 	return -EOPNOTSUPP;
9852 }
9853 
9854 #if TG3_VLAN_TAG_USED
tg3_vlan_rx_register(struct net_device * dev,struct vlan_group * grp)9855 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9856 {
9857 	struct tg3 *tp = netdev_priv(dev);
9858 
9859 	if (netif_running(dev))
9860 		tg3_netif_stop(tp);
9861 
9862 	tg3_full_lock(tp, 0);
9863 
9864 	tp->vlgrp = grp;
9865 
9866 	/* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9867 	__tg3_set_rx_mode(dev);
9868 
9869 	if (netif_running(dev))
9870 		tg3_netif_start(tp);
9871 
9872 	tg3_full_unlock(tp);
9873 }
9874 #endif
9875 
tg3_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)9876 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9877 {
9878 	struct tg3 *tp = netdev_priv(dev);
9879 
9880 	memcpy(ec, &tp->coal, sizeof(*ec));
9881 	return 0;
9882 }
9883 
tg3_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)9884 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9885 {
9886 	struct tg3 *tp = netdev_priv(dev);
9887 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9888 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9889 
9890 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9891 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9892 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9893 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9894 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9895 	}
9896 
9897 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9898 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9899 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9900 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9901 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9902 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9903 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9904 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9905 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9906 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9907 		return -EINVAL;
9908 
9909 	/* No rx interrupts will be generated if both are zero */
9910 	if ((ec->rx_coalesce_usecs == 0) &&
9911 	    (ec->rx_max_coalesced_frames == 0))
9912 		return -EINVAL;
9913 
9914 	/* No tx interrupts will be generated if both are zero */
9915 	if ((ec->tx_coalesce_usecs == 0) &&
9916 	    (ec->tx_max_coalesced_frames == 0))
9917 		return -EINVAL;
9918 
9919 	/* Only copy relevant parameters, ignore all others. */
9920 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9921 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9922 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9923 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9924 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9925 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9926 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9927 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9928 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9929 
9930 	if (netif_running(dev)) {
9931 		tg3_full_lock(tp, 0);
9932 		__tg3_set_coalesce(tp, &tp->coal);
9933 		tg3_full_unlock(tp);
9934 	}
9935 	return 0;
9936 }
9937 
9938 static const struct ethtool_ops tg3_ethtool_ops = {
9939 	.get_settings		= tg3_get_settings,
9940 	.set_settings		= tg3_set_settings,
9941 	.get_drvinfo		= tg3_get_drvinfo,
9942 	.get_regs_len		= tg3_get_regs_len,
9943 	.get_regs		= tg3_get_regs,
9944 	.get_wol		= tg3_get_wol,
9945 	.set_wol		= tg3_set_wol,
9946 	.get_msglevel		= tg3_get_msglevel,
9947 	.set_msglevel		= tg3_set_msglevel,
9948 	.nway_reset		= tg3_nway_reset,
9949 	.get_link		= ethtool_op_get_link,
9950 	.get_eeprom_len		= tg3_get_eeprom_len,
9951 	.get_eeprom		= tg3_get_eeprom,
9952 	.set_eeprom		= tg3_set_eeprom,
9953 	.get_ringparam		= tg3_get_ringparam,
9954 	.set_ringparam		= tg3_set_ringparam,
9955 	.get_pauseparam		= tg3_get_pauseparam,
9956 	.set_pauseparam		= tg3_set_pauseparam,
9957 	.get_rx_csum		= tg3_get_rx_csum,
9958 	.set_rx_csum		= tg3_set_rx_csum,
9959 	.set_tx_csum		= tg3_set_tx_csum,
9960 	.set_sg			= ethtool_op_set_sg,
9961 	.set_tso		= tg3_set_tso,
9962 	.self_test		= tg3_self_test,
9963 	.get_strings		= tg3_get_strings,
9964 	.phys_id		= tg3_phys_id,
9965 	.get_ethtool_stats	= tg3_get_ethtool_stats,
9966 	.get_coalesce		= tg3_get_coalesce,
9967 	.set_coalesce		= tg3_set_coalesce,
9968 	.get_sset_count		= tg3_get_sset_count,
9969 };
9970 
tg3_get_eeprom_size(struct tg3 * tp)9971 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9972 {
9973 	u32 cursize, val, magic;
9974 
9975 	tp->nvram_size = EEPROM_CHIP_SIZE;
9976 
9977 	if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9978 		return;
9979 
9980 	if ((magic != TG3_EEPROM_MAGIC) &&
9981 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9982 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9983 		return;
9984 
9985 	/*
9986 	 * Size the chip by reading offsets at increasing powers of two.
9987 	 * When we encounter our validation signature, we know the addressing
9988 	 * has wrapped around, and thus have our chip size.
9989 	 */
9990 	cursize = 0x10;
9991 
9992 	while (cursize < tp->nvram_size) {
9993 		if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9994 			return;
9995 
9996 		if (val == magic)
9997 			break;
9998 
9999 		cursize <<= 1;
10000 	}
10001 
10002 	tp->nvram_size = cursize;
10003 }
10004 
tg3_get_nvram_size(struct tg3 * tp)10005 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10006 {
10007 	u32 val;
10008 
10009 	if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10010 		return;
10011 
10012 	/* Selfboot format */
10013 	if (val != TG3_EEPROM_MAGIC) {
10014 		tg3_get_eeprom_size(tp);
10015 		return;
10016 	}
10017 
10018 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10019 		if (val != 0) {
10020 			tp->nvram_size = (val >> 16) * 1024;
10021 			return;
10022 		}
10023 	}
10024 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10025 }
10026 
tg3_get_nvram_info(struct tg3 * tp)10027 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10028 {
10029 	u32 nvcfg1;
10030 
10031 	nvcfg1 = tr32(NVRAM_CFG1);
10032 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10033 		tp->tg3_flags2 |= TG3_FLG2_FLASH;
10034 	}
10035 	else {
10036 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10037 		tw32(NVRAM_CFG1, nvcfg1);
10038 	}
10039 
10040 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10041 	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10042 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10043 			case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10044 				tp->nvram_jedecnum = JEDEC_ATMEL;
10045 				tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10046 				tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10047 				break;
10048 			case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10049 				tp->nvram_jedecnum = JEDEC_ATMEL;
10050                          	tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10051 				break;
10052 			case FLASH_VENDOR_ATMEL_EEPROM:
10053 				tp->nvram_jedecnum = JEDEC_ATMEL;
10054                          	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10055 				tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10056 				break;
10057 			case FLASH_VENDOR_ST:
10058 				tp->nvram_jedecnum = JEDEC_ST;
10059 				tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10060 				tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10061 				break;
10062 			case FLASH_VENDOR_SAIFUN:
10063 				tp->nvram_jedecnum = JEDEC_SAIFUN;
10064 				tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10065 				break;
10066 			case FLASH_VENDOR_SST_SMALL:
10067 			case FLASH_VENDOR_SST_LARGE:
10068 				tp->nvram_jedecnum = JEDEC_SST;
10069 				tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10070 				break;
10071 		}
10072 	}
10073 	else {
10074 		tp->nvram_jedecnum = JEDEC_ATMEL;
10075 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10076 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10077 	}
10078 }
10079 
tg3_get_5752_nvram_info(struct tg3 * tp)10080 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10081 {
10082 	u32 nvcfg1;
10083 
10084 	nvcfg1 = tr32(NVRAM_CFG1);
10085 
10086 	/* NVRAM protection for TPM */
10087 	if (nvcfg1 & (1 << 27))
10088 		tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10089 
10090 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10091 		case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10092 		case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10093 			tp->nvram_jedecnum = JEDEC_ATMEL;
10094 			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10095 			break;
10096 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10097 			tp->nvram_jedecnum = JEDEC_ATMEL;
10098 			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10099 			tp->tg3_flags2 |= TG3_FLG2_FLASH;
10100 			break;
10101 		case FLASH_5752VENDOR_ST_M45PE10:
10102 		case FLASH_5752VENDOR_ST_M45PE20:
10103 		case FLASH_5752VENDOR_ST_M45PE40:
10104 			tp->nvram_jedecnum = JEDEC_ST;
10105 			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10106 			tp->tg3_flags2 |= TG3_FLG2_FLASH;
10107 			break;
10108 	}
10109 
10110 	if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10111 		switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10112 			case FLASH_5752PAGE_SIZE_256:
10113 				tp->nvram_pagesize = 256;
10114 				break;
10115 			case FLASH_5752PAGE_SIZE_512:
10116 				tp->nvram_pagesize = 512;
10117 				break;
10118 			case FLASH_5752PAGE_SIZE_1K:
10119 				tp->nvram_pagesize = 1024;
10120 				break;
10121 			case FLASH_5752PAGE_SIZE_2K:
10122 				tp->nvram_pagesize = 2048;
10123 				break;
10124 			case FLASH_5752PAGE_SIZE_4K:
10125 				tp->nvram_pagesize = 4096;
10126 				break;
10127 			case FLASH_5752PAGE_SIZE_264:
10128 				tp->nvram_pagesize = 264;
10129 				break;
10130 		}
10131 	}
10132 	else {
10133 		/* For eeprom, set pagesize to maximum eeprom size */
10134 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10135 
10136 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10137 		tw32(NVRAM_CFG1, nvcfg1);
10138 	}
10139 }
10140 
tg3_get_5755_nvram_info(struct tg3 * tp)10141 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10142 {
10143 	u32 nvcfg1, protect = 0;
10144 
10145 	nvcfg1 = tr32(NVRAM_CFG1);
10146 
10147 	/* NVRAM protection for TPM */
10148 	if (nvcfg1 & (1 << 27)) {
10149 		tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10150 		protect = 1;
10151 	}
10152 
10153 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10154 	switch (nvcfg1) {
10155 		case FLASH_5755VENDOR_ATMEL_FLASH_1:
10156 		case FLASH_5755VENDOR_ATMEL_FLASH_2:
10157 		case FLASH_5755VENDOR_ATMEL_FLASH_3:
10158 		case FLASH_5755VENDOR_ATMEL_FLASH_5:
10159 			tp->nvram_jedecnum = JEDEC_ATMEL;
10160 			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10161 			tp->tg3_flags2 |= TG3_FLG2_FLASH;
10162 			tp->nvram_pagesize = 264;
10163 			if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10164 			    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10165 				tp->nvram_size = (protect ? 0x3e200 :
10166 						  TG3_NVRAM_SIZE_512KB);
10167 			else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10168 				tp->nvram_size = (protect ? 0x1f200 :
10169 						  TG3_NVRAM_SIZE_256KB);
10170 			else
10171 				tp->nvram_size = (protect ? 0x1f200 :
10172 						  TG3_NVRAM_SIZE_128KB);
10173 			break;
10174 		case FLASH_5752VENDOR_ST_M45PE10:
10175 		case FLASH_5752VENDOR_ST_M45PE20:
10176 		case FLASH_5752VENDOR_ST_M45PE40:
10177 			tp->nvram_jedecnum = JEDEC_ST;
10178 			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10179 			tp->tg3_flags2 |= TG3_FLG2_FLASH;
10180 			tp->nvram_pagesize = 256;
10181 			if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10182 				tp->nvram_size = (protect ?
10183 						  TG3_NVRAM_SIZE_64KB :
10184 						  TG3_NVRAM_SIZE_128KB);
10185 			else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10186 				tp->nvram_size = (protect ?
10187 						  TG3_NVRAM_SIZE_64KB :
10188 						  TG3_NVRAM_SIZE_256KB);
10189 			else
10190 				tp->nvram_size = (protect ?
10191 						  TG3_NVRAM_SIZE_128KB :
10192 						  TG3_NVRAM_SIZE_512KB);
10193 			break;
10194 	}
10195 }
10196 
tg3_get_5787_nvram_info(struct tg3 * tp)10197 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10198 {
10199 	u32 nvcfg1;
10200 
10201 	nvcfg1 = tr32(NVRAM_CFG1);
10202 
10203 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10204 		case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10205 		case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10206 		case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10207 		case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10208 			tp->nvram_jedecnum = JEDEC_ATMEL;
10209 			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10210 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10211 
10212 			nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10213 			tw32(NVRAM_CFG1, nvcfg1);
10214 			break;
10215 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10216 		case FLASH_5755VENDOR_ATMEL_FLASH_1:
10217 		case FLASH_5755VENDOR_ATMEL_FLASH_2:
10218 		case FLASH_5755VENDOR_ATMEL_FLASH_3:
10219 			tp->nvram_jedecnum = JEDEC_ATMEL;
10220 			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10221 			tp->tg3_flags2 |= TG3_FLG2_FLASH;
10222 			tp->nvram_pagesize = 264;
10223 			break;
10224 		case FLASH_5752VENDOR_ST_M45PE10:
10225 		case FLASH_5752VENDOR_ST_M45PE20:
10226 		case FLASH_5752VENDOR_ST_M45PE40:
10227 			tp->nvram_jedecnum = JEDEC_ST;
10228 			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10229 			tp->tg3_flags2 |= TG3_FLG2_FLASH;
10230 			tp->nvram_pagesize = 256;
10231 			break;
10232 	}
10233 }
10234 
tg3_get_5761_nvram_info(struct tg3 * tp)10235 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10236 {
10237 	u32 nvcfg1, protect = 0;
10238 
10239 	nvcfg1 = tr32(NVRAM_CFG1);
10240 
10241 	/* NVRAM protection for TPM */
10242 	if (nvcfg1 & (1 << 27)) {
10243 		tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10244 		protect = 1;
10245 	}
10246 
10247 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10248 	switch (nvcfg1) {
10249 		case FLASH_5761VENDOR_ATMEL_ADB021D:
10250 		case FLASH_5761VENDOR_ATMEL_ADB041D:
10251 		case FLASH_5761VENDOR_ATMEL_ADB081D:
10252 		case FLASH_5761VENDOR_ATMEL_ADB161D:
10253 		case FLASH_5761VENDOR_ATMEL_MDB021D:
10254 		case FLASH_5761VENDOR_ATMEL_MDB041D:
10255 		case FLASH_5761VENDOR_ATMEL_MDB081D:
10256 		case FLASH_5761VENDOR_ATMEL_MDB161D:
10257 			tp->nvram_jedecnum = JEDEC_ATMEL;
10258 			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10259 			tp->tg3_flags2 |= TG3_FLG2_FLASH;
10260 			tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10261 			tp->nvram_pagesize = 256;
10262 			break;
10263 		case FLASH_5761VENDOR_ST_A_M45PE20:
10264 		case FLASH_5761VENDOR_ST_A_M45PE40:
10265 		case FLASH_5761VENDOR_ST_A_M45PE80:
10266 		case FLASH_5761VENDOR_ST_A_M45PE16:
10267 		case FLASH_5761VENDOR_ST_M_M45PE20:
10268 		case FLASH_5761VENDOR_ST_M_M45PE40:
10269 		case FLASH_5761VENDOR_ST_M_M45PE80:
10270 		case FLASH_5761VENDOR_ST_M_M45PE16:
10271 			tp->nvram_jedecnum = JEDEC_ST;
10272 			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10273 			tp->tg3_flags2 |= TG3_FLG2_FLASH;
10274 			tp->nvram_pagesize = 256;
10275 			break;
10276 	}
10277 
10278 	if (protect) {
10279 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10280 	} else {
10281 		switch (nvcfg1) {
10282 			case FLASH_5761VENDOR_ATMEL_ADB161D:
10283 			case FLASH_5761VENDOR_ATMEL_MDB161D:
10284 			case FLASH_5761VENDOR_ST_A_M45PE16:
10285 			case FLASH_5761VENDOR_ST_M_M45PE16:
10286 				tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10287 				break;
10288 			case FLASH_5761VENDOR_ATMEL_ADB081D:
10289 			case FLASH_5761VENDOR_ATMEL_MDB081D:
10290 			case FLASH_5761VENDOR_ST_A_M45PE80:
10291 			case FLASH_5761VENDOR_ST_M_M45PE80:
10292 				tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10293 				break;
10294 			case FLASH_5761VENDOR_ATMEL_ADB041D:
10295 			case FLASH_5761VENDOR_ATMEL_MDB041D:
10296 			case FLASH_5761VENDOR_ST_A_M45PE40:
10297 			case FLASH_5761VENDOR_ST_M_M45PE40:
10298 				tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10299 				break;
10300 			case FLASH_5761VENDOR_ATMEL_ADB021D:
10301 			case FLASH_5761VENDOR_ATMEL_MDB021D:
10302 			case FLASH_5761VENDOR_ST_A_M45PE20:
10303 			case FLASH_5761VENDOR_ST_M_M45PE20:
10304 				tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10305 				break;
10306 		}
10307 	}
10308 }
10309 
tg3_get_5906_nvram_info(struct tg3 * tp)10310 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10311 {
10312 	tp->nvram_jedecnum = JEDEC_ATMEL;
10313 	tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10314 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10315 }
10316 
tg3_get_57780_nvram_info(struct tg3 * tp)10317 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
10318 {
10319 	u32 nvcfg1;
10320 
10321 	nvcfg1 = tr32(NVRAM_CFG1);
10322 
10323 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10324 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10325 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10326 		tp->nvram_jedecnum = JEDEC_ATMEL;
10327 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10328 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10329 
10330 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10331 		tw32(NVRAM_CFG1, nvcfg1);
10332 		return;
10333 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10334 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10335 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10336 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10337 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10338 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10339 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10340 		tp->nvram_jedecnum = JEDEC_ATMEL;
10341 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10342 		tp->tg3_flags2 |= TG3_FLG2_FLASH;
10343 
10344 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10345 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10346 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10347 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10348 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10349 			break;
10350 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10351 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10352 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10353 			break;
10354 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10355 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10356 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10357 			break;
10358 		}
10359 		break;
10360 	case FLASH_5752VENDOR_ST_M45PE10:
10361 	case FLASH_5752VENDOR_ST_M45PE20:
10362 	case FLASH_5752VENDOR_ST_M45PE40:
10363 		tp->nvram_jedecnum = JEDEC_ST;
10364 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10365 		tp->tg3_flags2 |= TG3_FLG2_FLASH;
10366 
10367 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10368 		case FLASH_5752VENDOR_ST_M45PE10:
10369 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10370 			break;
10371 		case FLASH_5752VENDOR_ST_M45PE20:
10372 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10373 			break;
10374 		case FLASH_5752VENDOR_ST_M45PE40:
10375 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10376 			break;
10377 		}
10378 		break;
10379 	default:
10380 		return;
10381 	}
10382 
10383 	switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10384 	case FLASH_5752PAGE_SIZE_256:
10385 		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10386 		tp->nvram_pagesize = 256;
10387 		break;
10388 	case FLASH_5752PAGE_SIZE_512:
10389 		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10390 		tp->nvram_pagesize = 512;
10391 		break;
10392 	case FLASH_5752PAGE_SIZE_1K:
10393 		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10394 		tp->nvram_pagesize = 1024;
10395 		break;
10396 	case FLASH_5752PAGE_SIZE_2K:
10397 		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10398 		tp->nvram_pagesize = 2048;
10399 		break;
10400 	case FLASH_5752PAGE_SIZE_4K:
10401 		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10402 		tp->nvram_pagesize = 4096;
10403 		break;
10404 	case FLASH_5752PAGE_SIZE_264:
10405 		tp->nvram_pagesize = 264;
10406 		break;
10407 	case FLASH_5752PAGE_SIZE_528:
10408 		tp->nvram_pagesize = 528;
10409 		break;
10410 	}
10411 }
10412 
10413 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
tg3_nvram_init(struct tg3 * tp)10414 static void __devinit tg3_nvram_init(struct tg3 *tp)
10415 {
10416 	tw32_f(GRC_EEPROM_ADDR,
10417 	     (EEPROM_ADDR_FSM_RESET |
10418 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
10419 	       EEPROM_ADDR_CLKPERD_SHIFT)));
10420 
10421 	msleep(1);
10422 
10423 	/* Enable seeprom accesses. */
10424 	tw32_f(GRC_LOCAL_CTRL,
10425 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10426 	udelay(100);
10427 
10428 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10429 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10430 		tp->tg3_flags |= TG3_FLAG_NVRAM;
10431 
10432 		if (tg3_nvram_lock(tp)) {
10433 			printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10434 			       "tg3_nvram_init failed.\n", tp->dev->name);
10435 			return;
10436 		}
10437 		tg3_enable_nvram_access(tp);
10438 
10439 		tp->nvram_size = 0;
10440 
10441 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10442 			tg3_get_5752_nvram_info(tp);
10443 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10444 			tg3_get_5755_nvram_info(tp);
10445 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10446 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10447 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10448 			tg3_get_5787_nvram_info(tp);
10449 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10450 			tg3_get_5761_nvram_info(tp);
10451 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10452 			tg3_get_5906_nvram_info(tp);
10453 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
10454 			tg3_get_57780_nvram_info(tp);
10455 		else
10456 			tg3_get_nvram_info(tp);
10457 
10458 		if (tp->nvram_size == 0)
10459 			tg3_get_nvram_size(tp);
10460 
10461 		tg3_disable_nvram_access(tp);
10462 		tg3_nvram_unlock(tp);
10463 
10464 	} else {
10465 		tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10466 
10467 		tg3_get_eeprom_size(tp);
10468 	}
10469 }
10470 
tg3_nvram_read_using_eeprom(struct tg3 * tp,u32 offset,u32 * val)10471 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10472 					u32 offset, u32 *val)
10473 {
10474 	u32 tmp;
10475 	int i;
10476 
10477 	if (offset > EEPROM_ADDR_ADDR_MASK ||
10478 	    (offset % 4) != 0)
10479 		return -EINVAL;
10480 
10481 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10482 					EEPROM_ADDR_DEVID_MASK |
10483 					EEPROM_ADDR_READ);
10484 	tw32(GRC_EEPROM_ADDR,
10485 	     tmp |
10486 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
10487 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10488 	      EEPROM_ADDR_ADDR_MASK) |
10489 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
10490 
10491 	for (i = 0; i < 1000; i++) {
10492 		tmp = tr32(GRC_EEPROM_ADDR);
10493 
10494 		if (tmp & EEPROM_ADDR_COMPLETE)
10495 			break;
10496 		msleep(1);
10497 	}
10498 	if (!(tmp & EEPROM_ADDR_COMPLETE))
10499 		return -EBUSY;
10500 
10501 	*val = tr32(GRC_EEPROM_DATA);
10502 	return 0;
10503 }
10504 
10505 #define NVRAM_CMD_TIMEOUT 10000
10506 
tg3_nvram_exec_cmd(struct tg3 * tp,u32 nvram_cmd)10507 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10508 {
10509 	int i;
10510 
10511 	tw32(NVRAM_CMD, nvram_cmd);
10512 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10513 		udelay(10);
10514 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10515 			udelay(10);
10516 			break;
10517 		}
10518 	}
10519 	if (i == NVRAM_CMD_TIMEOUT) {
10520 		return -EBUSY;
10521 	}
10522 	return 0;
10523 }
10524 
tg3_nvram_phys_addr(struct tg3 * tp,u32 addr)10525 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10526 {
10527 	if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10528 	    (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10529 	    (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10530 	   !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10531 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
10532 
10533 		addr = ((addr / tp->nvram_pagesize) <<
10534 			ATMEL_AT45DB0X1B_PAGE_POS) +
10535 		       (addr % tp->nvram_pagesize);
10536 
10537 	return addr;
10538 }
10539 
tg3_nvram_logical_addr(struct tg3 * tp,u32 addr)10540 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10541 {
10542 	if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10543 	    (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10544 	    (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10545 	   !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10546 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
10547 
10548 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10549 			tp->nvram_pagesize) +
10550 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10551 
10552 	return addr;
10553 }
10554 
tg3_nvram_read(struct tg3 * tp,u32 offset,u32 * val)10555 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10556 {
10557 	int ret;
10558 
10559 	if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10560 		return tg3_nvram_read_using_eeprom(tp, offset, val);
10561 
10562 	offset = tg3_nvram_phys_addr(tp, offset);
10563 
10564 	if (offset > NVRAM_ADDR_MSK)
10565 		return -EINVAL;
10566 
10567 	ret = tg3_nvram_lock(tp);
10568 	if (ret)
10569 		return ret;
10570 
10571 	tg3_enable_nvram_access(tp);
10572 
10573 	tw32(NVRAM_ADDR, offset);
10574 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10575 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10576 
10577 	if (ret == 0)
10578 		*val = swab32(tr32(NVRAM_RDDATA));
10579 
10580 	tg3_disable_nvram_access(tp);
10581 
10582 	tg3_nvram_unlock(tp);
10583 
10584 	return ret;
10585 }
10586 
tg3_nvram_read_le(struct tg3 * tp,u32 offset,__le32 * val)10587 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10588 {
10589 	u32 v;
10590 	int res = tg3_nvram_read(tp, offset, &v);
10591 	if (!res)
10592 		*val = cpu_to_le32(v);
10593 	return res;
10594 }
10595 
tg3_nvram_read_swab(struct tg3 * tp,u32 offset,u32 * val)10596 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10597 {
10598 	int err;
10599 	u32 tmp;
10600 
10601 	err = tg3_nvram_read(tp, offset, &tmp);
10602 	*val = swab32(tmp);
10603 	return err;
10604 }
10605 
tg3_nvram_write_block_using_eeprom(struct tg3 * tp,u32 offset,u32 len,u8 * buf)10606 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10607 				    u32 offset, u32 len, u8 *buf)
10608 {
10609 	int i, j, rc = 0;
10610 	u32 val;
10611 
10612 	for (i = 0; i < len; i += 4) {
10613 		u32 addr;
10614 		__le32 data;
10615 
10616 		addr = offset + i;
10617 
10618 		memcpy(&data, buf + i, 4);
10619 
10620 		tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10621 
10622 		val = tr32(GRC_EEPROM_ADDR);
10623 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10624 
10625 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10626 			EEPROM_ADDR_READ);
10627 		tw32(GRC_EEPROM_ADDR, val |
10628 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
10629 			(addr & EEPROM_ADDR_ADDR_MASK) |
10630 			EEPROM_ADDR_START |
10631 			EEPROM_ADDR_WRITE);
10632 
10633 		for (j = 0; j < 1000; j++) {
10634 			val = tr32(GRC_EEPROM_ADDR);
10635 
10636 			if (val & EEPROM_ADDR_COMPLETE)
10637 				break;
10638 			msleep(1);
10639 		}
10640 		if (!(val & EEPROM_ADDR_COMPLETE)) {
10641 			rc = -EBUSY;
10642 			break;
10643 		}
10644 	}
10645 
10646 	return rc;
10647 }
10648 
10649 /* offset and length are dword aligned */
tg3_nvram_write_block_unbuffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)10650 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10651 		u8 *buf)
10652 {
10653 	int ret = 0;
10654 	u32 pagesize = tp->nvram_pagesize;
10655 	u32 pagemask = pagesize - 1;
10656 	u32 nvram_cmd;
10657 	u8 *tmp;
10658 
10659 	tmp = kmalloc(pagesize, GFP_KERNEL);
10660 	if (tmp == NULL)
10661 		return -ENOMEM;
10662 
10663 	while (len) {
10664 		int j;
10665 		u32 phy_addr, page_off, size;
10666 
10667 		phy_addr = offset & ~pagemask;
10668 
10669 		for (j = 0; j < pagesize; j += 4) {
10670 			if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
10671 						(__le32 *) (tmp + j))))
10672 				break;
10673 		}
10674 		if (ret)
10675 			break;
10676 
10677 	        page_off = offset & pagemask;
10678 		size = pagesize;
10679 		if (len < size)
10680 			size = len;
10681 
10682 		len -= size;
10683 
10684 		memcpy(tmp + page_off, buf, size);
10685 
10686 		offset = offset + (pagesize - page_off);
10687 
10688 		tg3_enable_nvram_access(tp);
10689 
10690 		/*
10691 		 * Before we can erase the flash page, we need
10692 		 * to issue a special "write enable" command.
10693 		 */
10694 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10695 
10696 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10697 			break;
10698 
10699 		/* Erase the target page */
10700 		tw32(NVRAM_ADDR, phy_addr);
10701 
10702 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10703 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10704 
10705 	        if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10706 			break;
10707 
10708 		/* Issue another write enable to start the write. */
10709 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10710 
10711 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10712 			break;
10713 
10714 		for (j = 0; j < pagesize; j += 4) {
10715 			__be32 data;
10716 
10717 			data = *((__be32 *) (tmp + j));
10718 			/* swab32(le32_to_cpu(data)), actually */
10719 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
10720 
10721 			tw32(NVRAM_ADDR, phy_addr + j);
10722 
10723 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10724 				NVRAM_CMD_WR;
10725 
10726 			if (j == 0)
10727 				nvram_cmd |= NVRAM_CMD_FIRST;
10728 			else if (j == (pagesize - 4))
10729 				nvram_cmd |= NVRAM_CMD_LAST;
10730 
10731 			if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10732 				break;
10733 		}
10734 		if (ret)
10735 			break;
10736 	}
10737 
10738 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10739 	tg3_nvram_exec_cmd(tp, nvram_cmd);
10740 
10741 	kfree(tmp);
10742 
10743 	return ret;
10744 }
10745 
10746 /* offset and length are dword aligned */
tg3_nvram_write_block_buffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)10747 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10748 		u8 *buf)
10749 {
10750 	int i, ret = 0;
10751 
10752 	for (i = 0; i < len; i += 4, offset += 4) {
10753 		u32 page_off, phy_addr, nvram_cmd;
10754 		__be32 data;
10755 
10756 		memcpy(&data, buf + i, 4);
10757 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
10758 
10759 	        page_off = offset % tp->nvram_pagesize;
10760 
10761 		phy_addr = tg3_nvram_phys_addr(tp, offset);
10762 
10763 		tw32(NVRAM_ADDR, phy_addr);
10764 
10765 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10766 
10767 	        if ((page_off == 0) || (i == 0))
10768 			nvram_cmd |= NVRAM_CMD_FIRST;
10769 		if (page_off == (tp->nvram_pagesize - 4))
10770 			nvram_cmd |= NVRAM_CMD_LAST;
10771 
10772 		if (i == (len - 4))
10773 			nvram_cmd |= NVRAM_CMD_LAST;
10774 
10775 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10776 		    !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
10777 		    (tp->nvram_jedecnum == JEDEC_ST) &&
10778 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
10779 
10780 			if ((ret = tg3_nvram_exec_cmd(tp,
10781 				NVRAM_CMD_WREN | NVRAM_CMD_GO |
10782 				NVRAM_CMD_DONE)))
10783 
10784 				break;
10785 		}
10786 		if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10787 			/* We always do complete word writes to eeprom. */
10788 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10789 		}
10790 
10791 		if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10792 			break;
10793 	}
10794 	return ret;
10795 }
10796 
10797 /* offset and length are dword aligned */
tg3_nvram_write_block(struct tg3 * tp,u32 offset,u32 len,u8 * buf)10798 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10799 {
10800 	int ret;
10801 
10802 	if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10803 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10804 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
10805 		udelay(40);
10806 	}
10807 
10808 	if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10809 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10810 	}
10811 	else {
10812 		u32 grc_mode;
10813 
10814 		ret = tg3_nvram_lock(tp);
10815 		if (ret)
10816 			return ret;
10817 
10818 		tg3_enable_nvram_access(tp);
10819 		if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10820 		    !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10821 			tw32(NVRAM_WRITE1, 0x406);
10822 
10823 		grc_mode = tr32(GRC_MODE);
10824 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10825 
10826 		if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10827 			!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10828 
10829 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
10830 				buf);
10831 		}
10832 		else {
10833 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10834 				buf);
10835 		}
10836 
10837 		grc_mode = tr32(GRC_MODE);
10838 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10839 
10840 		tg3_disable_nvram_access(tp);
10841 		tg3_nvram_unlock(tp);
10842 	}
10843 
10844 	if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10845 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10846 		udelay(40);
10847 	}
10848 
10849 	return ret;
10850 }
10851 
10852 struct subsys_tbl_ent {
10853 	u16 subsys_vendor, subsys_devid;
10854 	u32 phy_id;
10855 };
10856 
10857 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10858 	/* Broadcom boards. */
10859 	{ PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10860 	{ PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10861 	{ PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10862 	{ PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },		    /* BCM95700A9 */
10863 	{ PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10864 	{ PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10865 	{ PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },		    /* BCM95701A7 */
10866 	{ PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10867 	{ PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10868 	{ PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10869 	{ PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10870 
10871 	/* 3com boards. */
10872 	{ PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10873 	{ PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10874 	{ PCI_VENDOR_ID_3COM, 0x1004, 0 },		/* 3C996SX */
10875 	{ PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10876 	{ PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10877 
10878 	/* DELL boards. */
10879 	{ PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10880 	{ PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10881 	{ PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10882 	{ PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10883 
10884 	/* Compaq boards. */
10885 	{ PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10886 	{ PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10887 	{ PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },		  /* CHANGELING */
10888 	{ PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10889 	{ PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10890 
10891 	/* IBM boards. */
10892 	{ PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10893 };
10894 
lookup_by_subsys(struct tg3 * tp)10895 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10896 {
10897 	int i;
10898 
10899 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10900 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
10901 		     tp->pdev->subsystem_vendor) &&
10902 		    (subsys_id_to_phy_id[i].subsys_devid ==
10903 		     tp->pdev->subsystem_device))
10904 			return &subsys_id_to_phy_id[i];
10905 	}
10906 	return NULL;
10907 }
10908 
tg3_get_eeprom_hw_cfg(struct tg3 * tp)10909 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10910 {
10911 	u32 val;
10912 	u16 pmcsr;
10913 
10914 	/* On some early chips the SRAM cannot be accessed in D3hot state,
10915 	 * so need make sure we're in D0.
10916 	 */
10917 	pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10918 	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10919 	pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10920 	msleep(1);
10921 
10922 	/* Make sure register accesses (indirect or otherwise)
10923 	 * will function correctly.
10924 	 */
10925 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10926 			       tp->misc_host_ctrl);
10927 
10928 	/* The memory arbiter has to be enabled in order for SRAM accesses
10929 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
10930 	 * sure it is enabled, but other entities such as system netboot
10931 	 * code might disable it.
10932 	 */
10933 	val = tr32(MEMARB_MODE);
10934 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10935 
10936 	tp->phy_id = PHY_ID_INVALID;
10937 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10938 
10939 	/* Assume an onboard device and WOL capable by default.  */
10940 	tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10941 
10942 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10943 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10944 			tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10945 			tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10946 		}
10947 		val = tr32(VCPU_CFGSHDW);
10948 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
10949 			tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10950 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10951 		    (val & VCPU_CFGSHDW_WOL_MAGPKT))
10952 			tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10953 		goto done;
10954 	}
10955 
10956 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10957 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10958 		u32 nic_cfg, led_cfg;
10959 		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
10960 		int eeprom_phy_serdes = 0;
10961 
10962 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10963 		tp->nic_sram_data_cfg = nic_cfg;
10964 
10965 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10966 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
10967 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10968 		    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10969 		    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10970 		    (ver > 0) && (ver < 0x100))
10971 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10972 
10973 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10974 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
10975 
10976 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10977 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10978 			eeprom_phy_serdes = 1;
10979 
10980 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10981 		if (nic_phy_id != 0) {
10982 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10983 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10984 
10985 			eeprom_phy_id  = (id1 >> 16) << 10;
10986 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
10987 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10988 		} else
10989 			eeprom_phy_id = 0;
10990 
10991 		tp->phy_id = eeprom_phy_id;
10992 		if (eeprom_phy_serdes) {
10993 			if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10994 				tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10995 			else
10996 				tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10997 		}
10998 
10999 		if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11000 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11001 				    SHASTA_EXT_LED_MODE_MASK);
11002 		else
11003 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11004 
11005 		switch (led_cfg) {
11006 		default:
11007 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11008 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11009 			break;
11010 
11011 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11012 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11013 			break;
11014 
11015 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11016 			tp->led_ctrl = LED_CTRL_MODE_MAC;
11017 
11018 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
11019 			 * read on some older 5700/5701 bootcode.
11020 			 */
11021 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11022 			    ASIC_REV_5700 ||
11023 			    GET_ASIC_REV(tp->pci_chip_rev_id) ==
11024 			    ASIC_REV_5701)
11025 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11026 
11027 			break;
11028 
11029 		case SHASTA_EXT_LED_SHARED:
11030 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
11031 			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11032 			    tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11033 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11034 						 LED_CTRL_MODE_PHY_2);
11035 			break;
11036 
11037 		case SHASTA_EXT_LED_MAC:
11038 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11039 			break;
11040 
11041 		case SHASTA_EXT_LED_COMBO:
11042 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
11043 			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11044 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11045 						 LED_CTRL_MODE_PHY_2);
11046 			break;
11047 
11048 		}
11049 
11050 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11051 		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11052 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11053 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11054 
11055 		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11056 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11057 
11058 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11059 			tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11060 			if ((tp->pdev->subsystem_vendor ==
11061 			     PCI_VENDOR_ID_ARIMA) &&
11062 			    (tp->pdev->subsystem_device == 0x205a ||
11063 			     tp->pdev->subsystem_device == 0x2063))
11064 				tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11065 		} else {
11066 			tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11067 			tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11068 		}
11069 
11070 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11071 			tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11072 			if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11073 				tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11074 		}
11075 
11076 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11077 			(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11078 			tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11079 
11080 		if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11081 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11082 			tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11083 
11084 		if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11085 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11086 			tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11087 
11088 		if (cfg2 & (1 << 17))
11089 			tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11090 
11091 		/* serdes signal pre-emphasis in register 0x590 set by */
11092 		/* bootcode if bit 18 is set */
11093 		if (cfg2 & (1 << 18))
11094 			tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11095 
11096 		if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11097 		      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
11098 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
11099 			tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
11100 
11101 		if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11102 			u32 cfg3;
11103 
11104 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11105 			if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11106 				tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11107 		}
11108 
11109 		if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11110 			tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11111 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11112 			tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11113 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11114 			tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11115 	}
11116 done:
11117 	device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11118 	device_set_wakeup_enable(&tp->pdev->dev,
11119 				 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11120 }
11121 
tg3_issue_otp_command(struct tg3 * tp,u32 cmd)11122 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11123 {
11124 	int i;
11125 	u32 val;
11126 
11127 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11128 	tw32(OTP_CTRL, cmd);
11129 
11130 	/* Wait for up to 1 ms for command to execute. */
11131 	for (i = 0; i < 100; i++) {
11132 		val = tr32(OTP_STATUS);
11133 		if (val & OTP_STATUS_CMD_DONE)
11134 			break;
11135 		udelay(10);
11136 	}
11137 
11138 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11139 }
11140 
11141 /* Read the gphy configuration from the OTP region of the chip.  The gphy
11142  * configuration is a 32-bit value that straddles the alignment boundary.
11143  * We do two 32-bit reads and then shift and merge the results.
11144  */
tg3_read_otp_phycfg(struct tg3 * tp)11145 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11146 {
11147 	u32 bhalf_otp, thalf_otp;
11148 
11149 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11150 
11151 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11152 		return 0;
11153 
11154 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11155 
11156 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11157 		return 0;
11158 
11159 	thalf_otp = tr32(OTP_READ_DATA);
11160 
11161 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11162 
11163 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11164 		return 0;
11165 
11166 	bhalf_otp = tr32(OTP_READ_DATA);
11167 
11168 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11169 }
11170 
tg3_phy_probe(struct tg3 * tp)11171 static int __devinit tg3_phy_probe(struct tg3 *tp)
11172 {
11173 	u32 hw_phy_id_1, hw_phy_id_2;
11174 	u32 hw_phy_id, hw_phy_id_masked;
11175 	int err;
11176 
11177 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11178 		return tg3_phy_init(tp);
11179 
11180 	/* Reading the PHY ID register can conflict with ASF
11181 	 * firwmare access to the PHY hardware.
11182 	 */
11183 	err = 0;
11184 	if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11185 	    (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11186 		hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11187 	} else {
11188 		/* Now read the physical PHY_ID from the chip and verify
11189 		 * that it is sane.  If it doesn't look good, we fall back
11190 		 * to either the hard-coded table based PHY_ID and failing
11191 		 * that the value found in the eeprom area.
11192 		 */
11193 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11194 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11195 
11196 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11197 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11198 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11199 
11200 		hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11201 	}
11202 
11203 	if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11204 		tp->phy_id = hw_phy_id;
11205 		if (hw_phy_id_masked == PHY_ID_BCM8002)
11206 			tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11207 		else
11208 			tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11209 	} else {
11210 		if (tp->phy_id != PHY_ID_INVALID) {
11211 			/* Do nothing, phy ID already set up in
11212 			 * tg3_get_eeprom_hw_cfg().
11213 			 */
11214 		} else {
11215 			struct subsys_tbl_ent *p;
11216 
11217 			/* No eeprom signature?  Try the hardcoded
11218 			 * subsys device table.
11219 			 */
11220 			p = lookup_by_subsys(tp);
11221 			if (!p)
11222 				return -ENODEV;
11223 
11224 			tp->phy_id = p->phy_id;
11225 			if (!tp->phy_id ||
11226 			    tp->phy_id == PHY_ID_BCM8002)
11227 				tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11228 		}
11229 	}
11230 
11231 	if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11232 	    !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11233 	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11234 		u32 bmsr, adv_reg, tg3_ctrl, mask;
11235 
11236 		tg3_readphy(tp, MII_BMSR, &bmsr);
11237 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11238 		    (bmsr & BMSR_LSTATUS))
11239 			goto skip_phy_reset;
11240 
11241 		err = tg3_phy_reset(tp);
11242 		if (err)
11243 			return err;
11244 
11245 		adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11246 			   ADVERTISE_100HALF | ADVERTISE_100FULL |
11247 			   ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11248 		tg3_ctrl = 0;
11249 		if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11250 			tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11251 				    MII_TG3_CTRL_ADV_1000_FULL);
11252 			if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11253 			    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11254 				tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11255 					     MII_TG3_CTRL_ENABLE_AS_MASTER);
11256 		}
11257 
11258 		mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11259 			ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11260 			ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11261 		if (!tg3_copper_is_advertising_all(tp, mask)) {
11262 			tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11263 
11264 			if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11265 				tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11266 
11267 			tg3_writephy(tp, MII_BMCR,
11268 				     BMCR_ANENABLE | BMCR_ANRESTART);
11269 		}
11270 		tg3_phy_set_wirespeed(tp);
11271 
11272 		tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11273 		if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11274 			tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11275 	}
11276 
11277 skip_phy_reset:
11278 	if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11279 		err = tg3_init_5401phy_dsp(tp);
11280 		if (err)
11281 			return err;
11282 	}
11283 
11284 	if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11285 		err = tg3_init_5401phy_dsp(tp);
11286 	}
11287 
11288 	if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11289 		tp->link_config.advertising =
11290 			(ADVERTISED_1000baseT_Half |
11291 			 ADVERTISED_1000baseT_Full |
11292 			 ADVERTISED_Autoneg |
11293 			 ADVERTISED_FIBRE);
11294 	if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11295 		tp->link_config.advertising &=
11296 			~(ADVERTISED_1000baseT_Half |
11297 			  ADVERTISED_1000baseT_Full);
11298 
11299 	return err;
11300 }
11301 
tg3_read_partno(struct tg3 * tp)11302 static void __devinit tg3_read_partno(struct tg3 *tp)
11303 {
11304 	unsigned char vpd_data[256];
11305 	unsigned int i;
11306 	u32 magic;
11307 
11308 	if (tg3_nvram_read_swab(tp, 0x0, &magic))
11309 		goto out_not_found;
11310 
11311 	if (magic == TG3_EEPROM_MAGIC) {
11312 		for (i = 0; i < 256; i += 4) {
11313 			u32 tmp;
11314 
11315 			if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11316 				goto out_not_found;
11317 
11318 			vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11319 			vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11320 			vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11321 			vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11322 		}
11323 	} else {
11324 		int vpd_cap;
11325 
11326 		vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11327 		for (i = 0; i < 256; i += 4) {
11328 			u32 tmp, j = 0;
11329 			__le32 v;
11330 			u16 tmp16;
11331 
11332 			pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11333 					      i);
11334 			while (j++ < 100) {
11335 				pci_read_config_word(tp->pdev, vpd_cap +
11336 						     PCI_VPD_ADDR, &tmp16);
11337 				if (tmp16 & 0x8000)
11338 					break;
11339 				msleep(1);
11340 			}
11341 			if (!(tmp16 & 0x8000))
11342 				goto out_not_found;
11343 
11344 			pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11345 					      &tmp);
11346 			v = cpu_to_le32(tmp);
11347 			memcpy(&vpd_data[i], &v, 4);
11348 		}
11349 	}
11350 
11351 	/* Now parse and find the part number. */
11352 	for (i = 0; i < 254; ) {
11353 		unsigned char val = vpd_data[i];
11354 		unsigned int block_end;
11355 
11356 		if (val == 0x82 || val == 0x91) {
11357 			i = (i + 3 +
11358 			     (vpd_data[i + 1] +
11359 			      (vpd_data[i + 2] << 8)));
11360 			continue;
11361 		}
11362 
11363 		if (val != 0x90)
11364 			goto out_not_found;
11365 
11366 		block_end = (i + 3 +
11367 			     (vpd_data[i + 1] +
11368 			      (vpd_data[i + 2] << 8)));
11369 		i += 3;
11370 
11371 		if (block_end > 256)
11372 			goto out_not_found;
11373 
11374 		while (i < (block_end - 2)) {
11375 			if (vpd_data[i + 0] == 'P' &&
11376 			    vpd_data[i + 1] == 'N') {
11377 				int partno_len = vpd_data[i + 2];
11378 
11379 				i += 3;
11380 				if (partno_len > 24 || (partno_len + i) > 256)
11381 					goto out_not_found;
11382 
11383 				memcpy(tp->board_part_number,
11384 				       &vpd_data[i], partno_len);
11385 
11386 				/* Success. */
11387 				return;
11388 			}
11389 			i += 3 + vpd_data[i + 2];
11390 		}
11391 
11392 		/* Part number not found. */
11393 		goto out_not_found;
11394 	}
11395 
11396 out_not_found:
11397 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11398 		strcpy(tp->board_part_number, "BCM95906");
11399 	else
11400 		strcpy(tp->board_part_number, "none");
11401 }
11402 
tg3_fw_img_is_valid(struct tg3 * tp,u32 offset)11403 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11404 {
11405 	u32 val;
11406 
11407 	if (tg3_nvram_read_swab(tp, offset, &val) ||
11408 	    (val & 0xfc000000) != 0x0c000000 ||
11409 	    tg3_nvram_read_swab(tp, offset + 4, &val) ||
11410 	    val != 0)
11411 		return 0;
11412 
11413 	return 1;
11414 }
11415 
tg3_read_sb_ver(struct tg3 * tp,u32 val)11416 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
11417 {
11418 	u32 offset, major, minor, build;
11419 
11420 	tp->fw_ver[0] = 's';
11421 	tp->fw_ver[1] = 'b';
11422 	tp->fw_ver[2] = '\0';
11423 
11424 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
11425 		return;
11426 
11427 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
11428 	case TG3_EEPROM_SB_REVISION_0:
11429 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
11430 		break;
11431 	case TG3_EEPROM_SB_REVISION_2:
11432 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
11433 		break;
11434 	case TG3_EEPROM_SB_REVISION_3:
11435 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
11436 		break;
11437 	default:
11438 		return;
11439 	}
11440 
11441 	if (tg3_nvram_read_swab(tp, offset, &val))
11442 		return;
11443 
11444 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
11445 		TG3_EEPROM_SB_EDH_BLD_SHFT;
11446 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
11447 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
11448 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
11449 
11450 	if (minor > 99 || build > 26)
11451 		return;
11452 
11453 	snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
11454 
11455 	if (build > 0) {
11456 		tp->fw_ver[8] = 'a' + build - 1;
11457 		tp->fw_ver[9] = '\0';
11458 	}
11459 }
11460 
tg3_read_fw_ver(struct tg3 * tp)11461 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11462 {
11463 	u32 val, offset, start;
11464 	u32 ver_offset;
11465 	int i, bcnt;
11466 
11467 	if (tg3_nvram_read_swab(tp, 0, &val))
11468 		return;
11469 
11470 	if (val != TG3_EEPROM_MAGIC) {
11471 		if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
11472 			tg3_read_sb_ver(tp, val);
11473 
11474 		return;
11475 	}
11476 
11477 	if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11478 	    tg3_nvram_read_swab(tp, 0x4, &start))
11479 		return;
11480 
11481 	offset = tg3_nvram_logical_addr(tp, offset);
11482 
11483 	if (!tg3_fw_img_is_valid(tp, offset) ||
11484 	    tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11485 		return;
11486 
11487 	offset = offset + ver_offset - start;
11488 	for (i = 0; i < 16; i += 4) {
11489 		__le32 v;
11490 		if (tg3_nvram_read_le(tp, offset + i, &v))
11491 			return;
11492 
11493 		memcpy(tp->fw_ver + i, &v, 4);
11494 	}
11495 
11496 	if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11497 	     (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11498 		return;
11499 
11500 	for (offset = TG3_NVM_DIR_START;
11501 	     offset < TG3_NVM_DIR_END;
11502 	     offset += TG3_NVM_DIRENT_SIZE) {
11503 		if (tg3_nvram_read_swab(tp, offset, &val))
11504 			return;
11505 
11506 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11507 			break;
11508 	}
11509 
11510 	if (offset == TG3_NVM_DIR_END)
11511 		return;
11512 
11513 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11514 		start = 0x08000000;
11515 	else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11516 		return;
11517 
11518 	if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11519 	    !tg3_fw_img_is_valid(tp, offset) ||
11520 	    tg3_nvram_read_swab(tp, offset + 8, &val))
11521 		return;
11522 
11523 	offset += val - start;
11524 
11525 	bcnt = strlen(tp->fw_ver);
11526 
11527 	tp->fw_ver[bcnt++] = ',';
11528 	tp->fw_ver[bcnt++] = ' ';
11529 
11530 	for (i = 0; i < 4; i++) {
11531 		__le32 v;
11532 		if (tg3_nvram_read_le(tp, offset, &v))
11533 			return;
11534 
11535 		offset += sizeof(v);
11536 
11537 		if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11538 			memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11539 			break;
11540 		}
11541 
11542 		memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11543 		bcnt += sizeof(v);
11544 	}
11545 
11546 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11547 }
11548 
11549 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11550 
tg3_get_invariants(struct tg3 * tp)11551 static int __devinit tg3_get_invariants(struct tg3 *tp)
11552 {
11553 	static struct pci_device_id write_reorder_chipsets[] = {
11554 		{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
11555 		             PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11556 		{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
11557 		             PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11558 		{ PCI_DEVICE(PCI_VENDOR_ID_VIA,
11559 			     PCI_DEVICE_ID_VIA_8385_0) },
11560 		{ },
11561 	};
11562 	u32 misc_ctrl_reg;
11563 	u32 pci_state_reg, grc_misc_cfg;
11564 	u32 val;
11565 	u16 pci_cmd;
11566 	int err;
11567 
11568 	/* Force memory write invalidate off.  If we leave it on,
11569 	 * then on 5700_BX chips we have to enable a workaround.
11570 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11571 	 * to match the cacheline size.  The Broadcom driver have this
11572 	 * workaround but turns MWI off all the times so never uses
11573 	 * it.  This seems to suggest that the workaround is insufficient.
11574 	 */
11575 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11576 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11577 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11578 
11579 	/* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11580 	 * has the register indirect write enable bit set before
11581 	 * we try to access any of the MMIO registers.  It is also
11582 	 * critical that the PCI-X hw workaround situation is decided
11583 	 * before that as well.
11584 	 */
11585 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11586 			      &misc_ctrl_reg);
11587 
11588 	tp->pci_chip_rev_id = (misc_ctrl_reg >>
11589 			       MISC_HOST_CTRL_CHIPREV_SHIFT);
11590 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11591 		u32 prod_id_asic_rev;
11592 
11593 		pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11594 				      &prod_id_asic_rev);
11595 		tp->pci_chip_rev_id = prod_id_asic_rev;
11596 	}
11597 
11598 	/* Wrong chip ID in 5752 A0. This code can be removed later
11599 	 * as A0 is not in production.
11600 	 */
11601 	if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11602 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11603 
11604 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11605 	 * we need to disable memory and use config. cycles
11606 	 * only to access all registers. The 5702/03 chips
11607 	 * can mistakenly decode the special cycles from the
11608 	 * ICH chipsets as memory write cycles, causing corruption
11609 	 * of register and memory space. Only certain ICH bridges
11610 	 * will drive special cycles with non-zero data during the
11611 	 * address phase which can fall within the 5703's address
11612 	 * range. This is not an ICH bug as the PCI spec allows
11613 	 * non-zero address during special cycles. However, only
11614 	 * these ICH bridges are known to drive non-zero addresses
11615 	 * during special cycles.
11616 	 *
11617 	 * Since special cycles do not cross PCI bridges, we only
11618 	 * enable this workaround if the 5703 is on the secondary
11619 	 * bus of these ICH bridges.
11620 	 */
11621 	if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11622 	    (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11623 		static struct tg3_dev_id {
11624 			u32	vendor;
11625 			u32	device;
11626 			u32	rev;
11627 		} ich_chipsets[] = {
11628 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11629 			  PCI_ANY_ID },
11630 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11631 			  PCI_ANY_ID },
11632 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11633 			  0xa },
11634 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11635 			  PCI_ANY_ID },
11636 			{ },
11637 		};
11638 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
11639 		struct pci_dev *bridge = NULL;
11640 
11641 		while (pci_id->vendor != 0) {
11642 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
11643 						bridge);
11644 			if (!bridge) {
11645 				pci_id++;
11646 				continue;
11647 			}
11648 			if (pci_id->rev != PCI_ANY_ID) {
11649 				if (bridge->revision > pci_id->rev)
11650 					continue;
11651 			}
11652 			if (bridge->subordinate &&
11653 			    (bridge->subordinate->number ==
11654 			     tp->pdev->bus->number)) {
11655 
11656 				tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11657 				pci_dev_put(bridge);
11658 				break;
11659 			}
11660 		}
11661 	}
11662 
11663 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11664 		static struct tg3_dev_id {
11665 			u32	vendor;
11666 			u32	device;
11667 		} bridge_chipsets[] = {
11668 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11669 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11670 			{ },
11671 		};
11672 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11673 		struct pci_dev *bridge = NULL;
11674 
11675 		while (pci_id->vendor != 0) {
11676 			bridge = pci_get_device(pci_id->vendor,
11677 						pci_id->device,
11678 						bridge);
11679 			if (!bridge) {
11680 				pci_id++;
11681 				continue;
11682 			}
11683 			if (bridge->subordinate &&
11684 			    (bridge->subordinate->number <=
11685 			     tp->pdev->bus->number) &&
11686 			    (bridge->subordinate->subordinate >=
11687 			     tp->pdev->bus->number)) {
11688 				tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11689 				pci_dev_put(bridge);
11690 				break;
11691 			}
11692 		}
11693 	}
11694 
11695 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
11696 	 * DMA addresses > 40-bit. This bridge may have other additional
11697 	 * 57xx devices behind it in some 4-port NIC designs for example.
11698 	 * Any tg3 device found behind the bridge will also need the 40-bit
11699 	 * DMA workaround.
11700 	 */
11701 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11702 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11703 		tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11704 		tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11705 		tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11706 	}
11707 	else {
11708 		struct pci_dev *bridge = NULL;
11709 
11710 		do {
11711 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11712 						PCI_DEVICE_ID_SERVERWORKS_EPB,
11713 						bridge);
11714 			if (bridge && bridge->subordinate &&
11715 			    (bridge->subordinate->number <=
11716 			     tp->pdev->bus->number) &&
11717 			    (bridge->subordinate->subordinate >=
11718 			     tp->pdev->bus->number)) {
11719 				tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11720 				pci_dev_put(bridge);
11721 				break;
11722 			}
11723 		} while (bridge);
11724 	}
11725 
11726 	/* Initialize misc host control in PCI block. */
11727 	tp->misc_host_ctrl |= (misc_ctrl_reg &
11728 			       MISC_HOST_CTRL_CHIPREV);
11729 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11730 			       tp->misc_host_ctrl);
11731 
11732 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11733 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11734 		tp->pdev_peer = tg3_find_peer(tp);
11735 
11736 	/* Intentionally exclude ASIC_REV_5906 */
11737 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11738 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11739 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11740 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11741 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
11742 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
11743 		tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
11744 
11745 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11746 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11747 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11748 	    (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
11749 	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11750 		tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11751 
11752 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11753 	    (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11754 		tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11755 
11756 	/* 5700 B0 chips do not support checksumming correctly due
11757 	 * to hardware bugs.
11758 	 */
11759 	if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11760 		tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11761 	else {
11762 		tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11763 		tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
11764 		if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
11765 			tp->dev->features |= NETIF_F_IPV6_CSUM;
11766 	}
11767 
11768 	if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11769 		tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11770 		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11771 		    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11772 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11773 		     tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11774 		     tp->pdev_peer == tp->pdev))
11775 			tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11776 
11777 		if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
11778 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11779 			tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11780 			tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11781 		} else {
11782 			tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11783 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11784 				ASIC_REV_5750 &&
11785 	     		    tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11786 				tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11787 		}
11788 	}
11789 
11790 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11791 	     (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11792 		tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11793 
11794 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11795 			      &pci_state_reg);
11796 
11797 	tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11798 	if (tp->pcie_cap != 0) {
11799 		u16 lnkctl;
11800 
11801 		tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11802 
11803 		pcie_set_readrq(tp->pdev, 4096);
11804 
11805 		pci_read_config_word(tp->pdev,
11806 				     tp->pcie_cap + PCI_EXP_LNKCTL,
11807 				     &lnkctl);
11808 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
11809 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11810 				tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11811 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11812 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11813 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
11814 				tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
11815 		}
11816 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
11817 		tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11818 	} else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11819 		   (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11820 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11821 		if (!tp->pcix_cap) {
11822 			printk(KERN_ERR PFX "Cannot find PCI-X "
11823 					    "capability, aborting.\n");
11824 			return -EIO;
11825 		}
11826 
11827 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
11828 			tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11829 	}
11830 
11831 	/* If we have an AMD 762 or VIA K8T800 chipset, write
11832 	 * reordering to the mailbox registers done by the host
11833 	 * controller can cause major troubles.  We read back from
11834 	 * every mailbox register write to force the writes to be
11835 	 * posted to the chip in order.
11836 	 */
11837 	if (pci_dev_present(write_reorder_chipsets) &&
11838 	    !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11839 		tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11840 
11841 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
11842 			     &tp->pci_cacheline_sz);
11843 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
11844 			     &tp->pci_lat_timer);
11845 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11846 	    tp->pci_lat_timer < 64) {
11847 		tp->pci_lat_timer = 64;
11848 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
11849 				      tp->pci_lat_timer);
11850 	}
11851 
11852 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11853 		/* 5700 BX chips need to have their TX producer index
11854 		 * mailboxes written twice to workaround a bug.
11855 		 */
11856 		tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11857 
11858 		/* If we are in PCI-X mode, enable register write workaround.
11859 		 *
11860 		 * The workaround is to use indirect register accesses
11861 		 * for all chip writes not to mailbox registers.
11862 		 */
11863 		if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11864 			u32 pm_reg;
11865 
11866 			tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11867 
11868 			/* The chip can have it's power management PCI config
11869 			 * space registers clobbered due to this bug.
11870 			 * So explicitly force the chip into D0 here.
11871 			 */
11872 			pci_read_config_dword(tp->pdev,
11873 					      tp->pm_cap + PCI_PM_CTRL,
11874 					      &pm_reg);
11875 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11876 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11877 			pci_write_config_dword(tp->pdev,
11878 					       tp->pm_cap + PCI_PM_CTRL,
11879 					       pm_reg);
11880 
11881 			/* Also, force SERR#/PERR# in PCI command. */
11882 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11883 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11884 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11885 		}
11886 	}
11887 
11888 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11889 		tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11890 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11891 		tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11892 
11893 	/* Chip-specific fixup from Broadcom driver */
11894 	if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11895 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11896 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11897 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11898 	}
11899 
11900 	/* Default fast path register access methods */
11901 	tp->read32 = tg3_read32;
11902 	tp->write32 = tg3_write32;
11903 	tp->read32_mbox = tg3_read32;
11904 	tp->write32_mbox = tg3_write32;
11905 	tp->write32_tx_mbox = tg3_write32;
11906 	tp->write32_rx_mbox = tg3_write32;
11907 
11908 	/* Various workaround register access methods */
11909 	if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11910 		tp->write32 = tg3_write_indirect_reg32;
11911 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11912 		 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11913 		  tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11914 		/*
11915 		 * Back to back register writes can cause problems on these
11916 		 * chips, the workaround is to read back all reg writes
11917 		 * except those to mailbox regs.
11918 		 *
11919 		 * See tg3_write_indirect_reg32().
11920 		 */
11921 		tp->write32 = tg3_write_flush_reg32;
11922 	}
11923 
11924 
11925 	if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11926 	    (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11927 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11928 		if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11929 			tp->write32_rx_mbox = tg3_write_flush_reg32;
11930 	}
11931 
11932 	if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11933 		tp->read32 = tg3_read_indirect_reg32;
11934 		tp->write32 = tg3_write_indirect_reg32;
11935 		tp->read32_mbox = tg3_read_indirect_mbox;
11936 		tp->write32_mbox = tg3_write_indirect_mbox;
11937 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
11938 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
11939 
11940 		iounmap(tp->regs);
11941 		tp->regs = NULL;
11942 
11943 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11944 		pci_cmd &= ~PCI_COMMAND_MEMORY;
11945 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11946 	}
11947 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11948 		tp->read32_mbox = tg3_read32_mbox_5906;
11949 		tp->write32_mbox = tg3_write32_mbox_5906;
11950 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
11951 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
11952 	}
11953 
11954 	if (tp->write32 == tg3_write_indirect_reg32 ||
11955 	    ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11956 	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11957 	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11958 		tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11959 
11960 	/* Get eeprom hw config before calling tg3_set_power_state().
11961 	 * In particular, the TG3_FLG2_IS_NIC flag must be
11962 	 * determined before calling tg3_set_power_state() so that
11963 	 * we know whether or not to switch out of Vaux power.
11964 	 * When the flag is set, it means that GPIO1 is used for eeprom
11965 	 * write protect and also implies that it is a LOM where GPIOs
11966 	 * are not used to switch power.
11967 	 */
11968 	tg3_get_eeprom_hw_cfg(tp);
11969 
11970 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11971 		/* Allow reads and writes to the
11972 		 * APE register and memory space.
11973 		 */
11974 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11975 				 PCISTATE_ALLOW_APE_SHMEM_WR;
11976 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11977 				       pci_state_reg);
11978 	}
11979 
11980 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11981 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11982 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
11983 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
11984 		tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11985 
11986 	/* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11987 	 * GPIO1 driven high will bring 5700's external PHY out of reset.
11988 	 * It is also used as eeprom write protect on LOMs.
11989 	 */
11990 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11991 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11992 	    (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11993 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11994 				       GRC_LCLCTRL_GPIO_OUTPUT1);
11995 	/* Unused GPIO3 must be driven as output on 5752 because there
11996 	 * are no pull-up resistors on unused GPIO pins.
11997 	 */
11998 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11999 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12000 
12001 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12002 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12003 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12004 
12005 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12006 		/* Turn off the debug UART. */
12007 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12008 		if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12009 			/* Keep VMain power. */
12010 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12011 					      GRC_LCLCTRL_GPIO_OUTPUT0;
12012 	}
12013 
12014 	/* Force the chip into D0. */
12015 	err = tg3_set_power_state(tp, PCI_D0);
12016 	if (err) {
12017 		printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12018 		       pci_name(tp->pdev));
12019 		return err;
12020 	}
12021 
12022 	/* Derive initial jumbo mode from MTU assigned in
12023 	 * ether_setup() via the alloc_etherdev() call
12024 	 */
12025 	if (tp->dev->mtu > ETH_DATA_LEN &&
12026 	    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12027 		tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12028 
12029 	/* Determine WakeOnLan speed to use. */
12030 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12031 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12032 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12033 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12034 		tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12035 	} else {
12036 		tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12037 	}
12038 
12039 	/* A few boards don't want Ethernet@WireSpeed phy feature */
12040 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12041 	    ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12042 	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12043 	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12044 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12045 	    (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12046 		tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12047 
12048 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12049 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12050 		tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12051 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12052 		tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12053 
12054 	if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
12055 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12056 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12057 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780) {
12058 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12059 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12060 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12061 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12062 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12063 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12064 				tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12065 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12066 				tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12067 		} else
12068 			tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12069 	}
12070 
12071 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12072 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12073 		tp->phy_otp = tg3_read_otp_phycfg(tp);
12074 		if (tp->phy_otp == 0)
12075 			tp->phy_otp = TG3_OTP_DEFAULT;
12076 	}
12077 
12078 	if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12079 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12080 	else
12081 		tp->mi_mode = MAC_MI_MODE_BASE;
12082 
12083 	tp->coalesce_mode = 0;
12084 	if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12085 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12086 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12087 
12088 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12089 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12090 		tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12091 
12092 	err = tg3_mdio_init(tp);
12093 	if (err)
12094 		return err;
12095 
12096 	/* Initialize data/descriptor byte/word swapping. */
12097 	val = tr32(GRC_MODE);
12098 	val &= GRC_MODE_HOST_STACKUP;
12099 	tw32(GRC_MODE, val | tp->grc_mode);
12100 
12101 	tg3_switch_clocks(tp);
12102 
12103 	/* Clear this out for sanity. */
12104 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12105 
12106 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12107 			      &pci_state_reg);
12108 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12109 	    (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12110 		u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12111 
12112 		if (chiprevid == CHIPREV_ID_5701_A0 ||
12113 		    chiprevid == CHIPREV_ID_5701_B0 ||
12114 		    chiprevid == CHIPREV_ID_5701_B2 ||
12115 		    chiprevid == CHIPREV_ID_5701_B5) {
12116 			void __iomem *sram_base;
12117 
12118 			/* Write some dummy words into the SRAM status block
12119 			 * area, see if it reads back correctly.  If the return
12120 			 * value is bad, force enable the PCIX workaround.
12121 			 */
12122 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12123 
12124 			writel(0x00000000, sram_base);
12125 			writel(0x00000000, sram_base + 4);
12126 			writel(0xffffffff, sram_base + 4);
12127 			if (readl(sram_base) != 0x00000000)
12128 				tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12129 		}
12130 	}
12131 
12132 	udelay(50);
12133 	tg3_nvram_init(tp);
12134 
12135 	grc_misc_cfg = tr32(GRC_MISC_CFG);
12136 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12137 
12138 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12139 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12140 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12141 		tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12142 
12143 	if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12144 	    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12145 		tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12146 	if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12147 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12148 				      HOSTCC_MODE_CLRTICK_TXBD);
12149 
12150 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12151 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12152 				       tp->misc_host_ctrl);
12153 	}
12154 
12155 	/* Preserve the APE MAC_MODE bits */
12156 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12157 		tp->mac_mode = tr32(MAC_MODE) |
12158 			       MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12159 	else
12160 		tp->mac_mode = TG3_DEF_MAC_MODE;
12161 
12162 	/* these are limited to 10/100 only */
12163 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12164 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12165 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12166 	     tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12167 	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12168 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12169 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12170 	    (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12171 	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12172 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12173 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12174 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
12175 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12176 		tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12177 
12178 	err = tg3_phy_probe(tp);
12179 	if (err) {
12180 		printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12181 		       pci_name(tp->pdev), err);
12182 		/* ... but do not return immediately ... */
12183 		tg3_mdio_fini(tp);
12184 	}
12185 
12186 	tg3_read_partno(tp);
12187 	tg3_read_fw_ver(tp);
12188 
12189 	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12190 		tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12191 	} else {
12192 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12193 			tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12194 		else
12195 			tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12196 	}
12197 
12198 	/* 5700 {AX,BX} chips have a broken status block link
12199 	 * change bit implementation, so we must use the
12200 	 * status register in those cases.
12201 	 */
12202 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12203 		tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12204 	else
12205 		tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12206 
12207 	/* The led_ctrl is set during tg3_phy_probe, here we might
12208 	 * have to force the link status polling mechanism based
12209 	 * upon subsystem IDs.
12210 	 */
12211 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12212 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12213 	    !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12214 		tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12215 				  TG3_FLAG_USE_LINKCHG_REG);
12216 	}
12217 
12218 	/* For all SERDES we poll the MAC status register. */
12219 	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12220 		tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12221 	else
12222 		tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12223 
12224 	tp->rx_offset = NET_IP_ALIGN;
12225 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12226 	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12227 		tp->rx_offset = 0;
12228 
12229 	tp->rx_std_max_post = TG3_RX_RING_SIZE;
12230 
12231 	/* Increment the rx prod index on the rx std ring by at most
12232 	 * 8 for these chips to workaround hw errata.
12233 	 */
12234 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12235 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12236 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12237 		tp->rx_std_max_post = 8;
12238 
12239 	if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12240 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12241 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
12242 
12243 	return err;
12244 }
12245 
12246 #ifdef CONFIG_SPARC
tg3_get_macaddr_sparc(struct tg3 * tp)12247 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12248 {
12249 	struct net_device *dev = tp->dev;
12250 	struct pci_dev *pdev = tp->pdev;
12251 	struct device_node *dp = pci_device_to_OF_node(pdev);
12252 	const unsigned char *addr;
12253 	int len;
12254 
12255 	addr = of_get_property(dp, "local-mac-address", &len);
12256 	if (addr && len == 6) {
12257 		memcpy(dev->dev_addr, addr, 6);
12258 		memcpy(dev->perm_addr, dev->dev_addr, 6);
12259 		return 0;
12260 	}
12261 	return -ENODEV;
12262 }
12263 
tg3_get_default_macaddr_sparc(struct tg3 * tp)12264 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12265 {
12266 	struct net_device *dev = tp->dev;
12267 
12268 	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12269 	memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12270 	return 0;
12271 }
12272 #endif
12273 
tg3_get_device_address(struct tg3 * tp)12274 static int __devinit tg3_get_device_address(struct tg3 *tp)
12275 {
12276 	struct net_device *dev = tp->dev;
12277 	u32 hi, lo, mac_offset;
12278 	int addr_ok = 0;
12279 
12280 #ifdef CONFIG_SPARC
12281 	if (!tg3_get_macaddr_sparc(tp))
12282 		return 0;
12283 #endif
12284 
12285 	mac_offset = 0x7c;
12286 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12287 	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12288 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12289 			mac_offset = 0xcc;
12290 		if (tg3_nvram_lock(tp))
12291 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12292 		else
12293 			tg3_nvram_unlock(tp);
12294 	}
12295 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12296 		mac_offset = 0x10;
12297 
12298 	/* First try to get it from MAC address mailbox. */
12299 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12300 	if ((hi >> 16) == 0x484b) {
12301 		dev->dev_addr[0] = (hi >>  8) & 0xff;
12302 		dev->dev_addr[1] = (hi >>  0) & 0xff;
12303 
12304 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12305 		dev->dev_addr[2] = (lo >> 24) & 0xff;
12306 		dev->dev_addr[3] = (lo >> 16) & 0xff;
12307 		dev->dev_addr[4] = (lo >>  8) & 0xff;
12308 		dev->dev_addr[5] = (lo >>  0) & 0xff;
12309 
12310 		/* Some old bootcode may report a 0 MAC address in SRAM */
12311 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12312 	}
12313 	if (!addr_ok) {
12314 		/* Next, try NVRAM. */
12315 		if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12316 		    !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12317 			dev->dev_addr[0] = ((hi >> 16) & 0xff);
12318 			dev->dev_addr[1] = ((hi >> 24) & 0xff);
12319 			dev->dev_addr[2] = ((lo >>  0) & 0xff);
12320 			dev->dev_addr[3] = ((lo >>  8) & 0xff);
12321 			dev->dev_addr[4] = ((lo >> 16) & 0xff);
12322 			dev->dev_addr[5] = ((lo >> 24) & 0xff);
12323 		}
12324 		/* Finally just fetch it out of the MAC control regs. */
12325 		else {
12326 			hi = tr32(MAC_ADDR_0_HIGH);
12327 			lo = tr32(MAC_ADDR_0_LOW);
12328 
12329 			dev->dev_addr[5] = lo & 0xff;
12330 			dev->dev_addr[4] = (lo >> 8) & 0xff;
12331 			dev->dev_addr[3] = (lo >> 16) & 0xff;
12332 			dev->dev_addr[2] = (lo >> 24) & 0xff;
12333 			dev->dev_addr[1] = hi & 0xff;
12334 			dev->dev_addr[0] = (hi >> 8) & 0xff;
12335 		}
12336 	}
12337 
12338 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12339 #ifdef CONFIG_SPARC
12340 		if (!tg3_get_default_macaddr_sparc(tp))
12341 			return 0;
12342 #endif
12343 		return -EINVAL;
12344 	}
12345 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12346 	return 0;
12347 }
12348 
12349 #define BOUNDARY_SINGLE_CACHELINE	1
12350 #define BOUNDARY_MULTI_CACHELINE	2
12351 
tg3_calc_dma_bndry(struct tg3 * tp,u32 val)12352 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12353 {
12354 	int cacheline_size;
12355 	u8 byte;
12356 	int goal;
12357 
12358 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12359 	if (byte == 0)
12360 		cacheline_size = 1024;
12361 	else
12362 		cacheline_size = (int) byte * 4;
12363 
12364 	/* On 5703 and later chips, the boundary bits have no
12365 	 * effect.
12366 	 */
12367 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12368 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12369 	    !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12370 		goto out;
12371 
12372 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12373 	goal = BOUNDARY_MULTI_CACHELINE;
12374 #else
12375 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12376 	goal = BOUNDARY_SINGLE_CACHELINE;
12377 #else
12378 	goal = 0;
12379 #endif
12380 #endif
12381 
12382 	if (!goal)
12383 		goto out;
12384 
12385 	/* PCI controllers on most RISC systems tend to disconnect
12386 	 * when a device tries to burst across a cache-line boundary.
12387 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12388 	 *
12389 	 * Unfortunately, for PCI-E there are only limited
12390 	 * write-side controls for this, and thus for reads
12391 	 * we will still get the disconnects.  We'll also waste
12392 	 * these PCI cycles for both read and write for chips
12393 	 * other than 5700 and 5701 which do not implement the
12394 	 * boundary bits.
12395 	 */
12396 	if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12397 	    !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12398 		switch (cacheline_size) {
12399 		case 16:
12400 		case 32:
12401 		case 64:
12402 		case 128:
12403 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
12404 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12405 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12406 			} else {
12407 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12408 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12409 			}
12410 			break;
12411 
12412 		case 256:
12413 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12414 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12415 			break;
12416 
12417 		default:
12418 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12419 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12420 			break;
12421 		}
12422 	} else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12423 		switch (cacheline_size) {
12424 		case 16:
12425 		case 32:
12426 		case 64:
12427 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
12428 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12429 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12430 				break;
12431 			}
12432 			/* fallthrough */
12433 		case 128:
12434 		default:
12435 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12436 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12437 			break;
12438 		}
12439 	} else {
12440 		switch (cacheline_size) {
12441 		case 16:
12442 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
12443 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
12444 					DMA_RWCTRL_WRITE_BNDRY_16);
12445 				break;
12446 			}
12447 			/* fallthrough */
12448 		case 32:
12449 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
12450 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
12451 					DMA_RWCTRL_WRITE_BNDRY_32);
12452 				break;
12453 			}
12454 			/* fallthrough */
12455 		case 64:
12456 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
12457 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
12458 					DMA_RWCTRL_WRITE_BNDRY_64);
12459 				break;
12460 			}
12461 			/* fallthrough */
12462 		case 128:
12463 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
12464 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
12465 					DMA_RWCTRL_WRITE_BNDRY_128);
12466 				break;
12467 			}
12468 			/* fallthrough */
12469 		case 256:
12470 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
12471 				DMA_RWCTRL_WRITE_BNDRY_256);
12472 			break;
12473 		case 512:
12474 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
12475 				DMA_RWCTRL_WRITE_BNDRY_512);
12476 			break;
12477 		case 1024:
12478 		default:
12479 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12480 				DMA_RWCTRL_WRITE_BNDRY_1024);
12481 			break;
12482 		}
12483 	}
12484 
12485 out:
12486 	return val;
12487 }
12488 
tg3_do_test_dma(struct tg3 * tp,u32 * buf,dma_addr_t buf_dma,int size,int to_device)12489 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12490 {
12491 	struct tg3_internal_buffer_desc test_desc;
12492 	u32 sram_dma_descs;
12493 	int i, ret;
12494 
12495 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12496 
12497 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12498 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12499 	tw32(RDMAC_STATUS, 0);
12500 	tw32(WDMAC_STATUS, 0);
12501 
12502 	tw32(BUFMGR_MODE, 0);
12503 	tw32(FTQ_RESET, 0);
12504 
12505 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
12506 	test_desc.addr_lo = buf_dma & 0xffffffff;
12507 	test_desc.nic_mbuf = 0x00002100;
12508 	test_desc.len = size;
12509 
12510 	/*
12511 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12512 	 * the *second* time the tg3 driver was getting loaded after an
12513 	 * initial scan.
12514 	 *
12515 	 * Broadcom tells me:
12516 	 *   ...the DMA engine is connected to the GRC block and a DMA
12517 	 *   reset may affect the GRC block in some unpredictable way...
12518 	 *   The behavior of resets to individual blocks has not been tested.
12519 	 *
12520 	 * Broadcom noted the GRC reset will also reset all sub-components.
12521 	 */
12522 	if (to_device) {
12523 		test_desc.cqid_sqid = (13 << 8) | 2;
12524 
12525 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12526 		udelay(40);
12527 	} else {
12528 		test_desc.cqid_sqid = (16 << 8) | 7;
12529 
12530 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12531 		udelay(40);
12532 	}
12533 	test_desc.flags = 0x00000005;
12534 
12535 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12536 		u32 val;
12537 
12538 		val = *(((u32 *)&test_desc) + i);
12539 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12540 				       sram_dma_descs + (i * sizeof(u32)));
12541 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12542 	}
12543 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12544 
12545 	if (to_device) {
12546 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12547 	} else {
12548 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12549 	}
12550 
12551 	ret = -ENODEV;
12552 	for (i = 0; i < 40; i++) {
12553 		u32 val;
12554 
12555 		if (to_device)
12556 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12557 		else
12558 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12559 		if ((val & 0xffff) == sram_dma_descs) {
12560 			ret = 0;
12561 			break;
12562 		}
12563 
12564 		udelay(100);
12565 	}
12566 
12567 	return ret;
12568 }
12569 
12570 #define TEST_BUFFER_SIZE	0x2000
12571 
tg3_test_dma(struct tg3 * tp)12572 static int __devinit tg3_test_dma(struct tg3 *tp)
12573 {
12574 	dma_addr_t buf_dma;
12575 	u32 *buf, saved_dma_rwctrl;
12576 	int ret;
12577 
12578 	buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12579 	if (!buf) {
12580 		ret = -ENOMEM;
12581 		goto out_nofree;
12582 	}
12583 
12584 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12585 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12586 
12587 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12588 
12589 	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12590 		/* DMA read watermark not used on PCIE */
12591 		tp->dma_rwctrl |= 0x00180000;
12592 	} else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12593 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12594 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12595 			tp->dma_rwctrl |= 0x003f0000;
12596 		else
12597 			tp->dma_rwctrl |= 0x003f000f;
12598 	} else {
12599 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12600 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12601 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12602 			u32 read_water = 0x7;
12603 
12604 			/* If the 5704 is behind the EPB bridge, we can
12605 			 * do the less restrictive ONE_DMA workaround for
12606 			 * better performance.
12607 			 */
12608 			if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12609 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12610 				tp->dma_rwctrl |= 0x8000;
12611 			else if (ccval == 0x6 || ccval == 0x7)
12612 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12613 
12614 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12615 				read_water = 4;
12616 			/* Set bit 23 to enable PCIX hw bug fix */
12617 			tp->dma_rwctrl |=
12618 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12619 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12620 				(1 << 23);
12621 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12622 			/* 5780 always in PCIX mode */
12623 			tp->dma_rwctrl |= 0x00144000;
12624 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12625 			/* 5714 always in PCIX mode */
12626 			tp->dma_rwctrl |= 0x00148000;
12627 		} else {
12628 			tp->dma_rwctrl |= 0x001b000f;
12629 		}
12630 	}
12631 
12632 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12633 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12634 		tp->dma_rwctrl &= 0xfffffff0;
12635 
12636 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12637 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12638 		/* Remove this if it causes problems for some boards. */
12639 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12640 
12641 		/* On 5700/5701 chips, we need to set this bit.
12642 		 * Otherwise the chip will issue cacheline transactions
12643 		 * to streamable DMA memory with not all the byte
12644 		 * enables turned on.  This is an error on several
12645 		 * RISC PCI controllers, in particular sparc64.
12646 		 *
12647 		 * On 5703/5704 chips, this bit has been reassigned
12648 		 * a different meaning.  In particular, it is used
12649 		 * on those chips to enable a PCI-X workaround.
12650 		 */
12651 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12652 	}
12653 
12654 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12655 
12656 #if 0
12657 	/* Unneeded, already done by tg3_get_invariants.  */
12658 	tg3_switch_clocks(tp);
12659 #endif
12660 
12661 	ret = 0;
12662 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12663 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12664 		goto out;
12665 
12666 	/* It is best to perform DMA test with maximum write burst size
12667 	 * to expose the 5700/5701 write DMA bug.
12668 	 */
12669 	saved_dma_rwctrl = tp->dma_rwctrl;
12670 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12671 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12672 
12673 	while (1) {
12674 		u32 *p = buf, i;
12675 
12676 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12677 			p[i] = i;
12678 
12679 		/* Send the buffer to the chip. */
12680 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12681 		if (ret) {
12682 			printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12683 			break;
12684 		}
12685 
12686 #if 0
12687 		/* validate data reached card RAM correctly. */
12688 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12689 			u32 val;
12690 			tg3_read_mem(tp, 0x2100 + (i*4), &val);
12691 			if (le32_to_cpu(val) != p[i]) {
12692 				printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12693 				/* ret = -ENODEV here? */
12694 			}
12695 			p[i] = 0;
12696 		}
12697 #endif
12698 		/* Now read it back. */
12699 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12700 		if (ret) {
12701 			printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12702 
12703 			break;
12704 		}
12705 
12706 		/* Verify it. */
12707 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12708 			if (p[i] == i)
12709 				continue;
12710 
12711 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12712 			    DMA_RWCTRL_WRITE_BNDRY_16) {
12713 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12714 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12715 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12716 				break;
12717 			} else {
12718 				printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12719 				ret = -ENODEV;
12720 				goto out;
12721 			}
12722 		}
12723 
12724 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12725 			/* Success. */
12726 			ret = 0;
12727 			break;
12728 		}
12729 	}
12730 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12731 	    DMA_RWCTRL_WRITE_BNDRY_16) {
12732 		static struct pci_device_id dma_wait_state_chipsets[] = {
12733 			{ PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12734 				     PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12735 			{ },
12736 		};
12737 
12738 		/* DMA test passed without adjusting DMA boundary,
12739 		 * now look for chipsets that are known to expose the
12740 		 * DMA bug without failing the test.
12741 		 */
12742 		if (pci_dev_present(dma_wait_state_chipsets)) {
12743 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12744 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12745 		}
12746 		else
12747 			/* Safe to use the calculated DMA boundary. */
12748 			tp->dma_rwctrl = saved_dma_rwctrl;
12749 
12750 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12751 	}
12752 
12753 out:
12754 	pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12755 out_nofree:
12756 	return ret;
12757 }
12758 
tg3_init_link_config(struct tg3 * tp)12759 static void __devinit tg3_init_link_config(struct tg3 *tp)
12760 {
12761 	tp->link_config.advertising =
12762 		(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12763 		 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12764 		 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12765 		 ADVERTISED_Autoneg | ADVERTISED_MII);
12766 	tp->link_config.speed = SPEED_INVALID;
12767 	tp->link_config.duplex = DUPLEX_INVALID;
12768 	tp->link_config.autoneg = AUTONEG_ENABLE;
12769 	tp->link_config.active_speed = SPEED_INVALID;
12770 	tp->link_config.active_duplex = DUPLEX_INVALID;
12771 	tp->link_config.phy_is_low_power = 0;
12772 	tp->link_config.orig_speed = SPEED_INVALID;
12773 	tp->link_config.orig_duplex = DUPLEX_INVALID;
12774 	tp->link_config.orig_autoneg = AUTONEG_INVALID;
12775 }
12776 
tg3_init_bufmgr_config(struct tg3 * tp)12777 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12778 {
12779 	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12780 		tp->bufmgr_config.mbuf_read_dma_low_water =
12781 			DEFAULT_MB_RDMA_LOW_WATER_5705;
12782 		tp->bufmgr_config.mbuf_mac_rx_low_water =
12783 			DEFAULT_MB_MACRX_LOW_WATER_5705;
12784 		tp->bufmgr_config.mbuf_high_water =
12785 			DEFAULT_MB_HIGH_WATER_5705;
12786 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12787 			tp->bufmgr_config.mbuf_mac_rx_low_water =
12788 				DEFAULT_MB_MACRX_LOW_WATER_5906;
12789 			tp->bufmgr_config.mbuf_high_water =
12790 				DEFAULT_MB_HIGH_WATER_5906;
12791 		}
12792 
12793 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12794 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12795 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12796 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12797 		tp->bufmgr_config.mbuf_high_water_jumbo =
12798 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12799 	} else {
12800 		tp->bufmgr_config.mbuf_read_dma_low_water =
12801 			DEFAULT_MB_RDMA_LOW_WATER;
12802 		tp->bufmgr_config.mbuf_mac_rx_low_water =
12803 			DEFAULT_MB_MACRX_LOW_WATER;
12804 		tp->bufmgr_config.mbuf_high_water =
12805 			DEFAULT_MB_HIGH_WATER;
12806 
12807 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12808 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12809 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12810 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12811 		tp->bufmgr_config.mbuf_high_water_jumbo =
12812 			DEFAULT_MB_HIGH_WATER_JUMBO;
12813 	}
12814 
12815 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12816 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12817 }
12818 
tg3_phy_string(struct tg3 * tp)12819 static char * __devinit tg3_phy_string(struct tg3 *tp)
12820 {
12821 	switch (tp->phy_id & PHY_ID_MASK) {
12822 	case PHY_ID_BCM5400:	return "5400";
12823 	case PHY_ID_BCM5401:	return "5401";
12824 	case PHY_ID_BCM5411:	return "5411";
12825 	case PHY_ID_BCM5701:	return "5701";
12826 	case PHY_ID_BCM5703:	return "5703";
12827 	case PHY_ID_BCM5704:	return "5704";
12828 	case PHY_ID_BCM5705:	return "5705";
12829 	case PHY_ID_BCM5750:	return "5750";
12830 	case PHY_ID_BCM5752:	return "5752";
12831 	case PHY_ID_BCM5714:	return "5714";
12832 	case PHY_ID_BCM5780:	return "5780";
12833 	case PHY_ID_BCM5755:	return "5755";
12834 	case PHY_ID_BCM5787:	return "5787";
12835 	case PHY_ID_BCM5784:	return "5784";
12836 	case PHY_ID_BCM5756:	return "5722/5756";
12837 	case PHY_ID_BCM5906:	return "5906";
12838 	case PHY_ID_BCM5761:	return "5761";
12839 	case PHY_ID_BCM8002:	return "8002/serdes";
12840 	case 0:			return "serdes";
12841 	default:		return "unknown";
12842 	}
12843 }
12844 
tg3_bus_string(struct tg3 * tp,char * str)12845 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12846 {
12847 	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12848 		strcpy(str, "PCI Express");
12849 		return str;
12850 	} else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12851 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12852 
12853 		strcpy(str, "PCIX:");
12854 
12855 		if ((clock_ctrl == 7) ||
12856 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12857 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12858 			strcat(str, "133MHz");
12859 		else if (clock_ctrl == 0)
12860 			strcat(str, "33MHz");
12861 		else if (clock_ctrl == 2)
12862 			strcat(str, "50MHz");
12863 		else if (clock_ctrl == 4)
12864 			strcat(str, "66MHz");
12865 		else if (clock_ctrl == 6)
12866 			strcat(str, "100MHz");
12867 	} else {
12868 		strcpy(str, "PCI:");
12869 		if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12870 			strcat(str, "66MHz");
12871 		else
12872 			strcat(str, "33MHz");
12873 	}
12874 	if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12875 		strcat(str, ":32-bit");
12876 	else
12877 		strcat(str, ":64-bit");
12878 	return str;
12879 }
12880 
tg3_find_peer(struct tg3 * tp)12881 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12882 {
12883 	struct pci_dev *peer;
12884 	unsigned int func, devnr = tp->pdev->devfn & ~7;
12885 
12886 	for (func = 0; func < 8; func++) {
12887 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
12888 		if (peer && peer != tp->pdev)
12889 			break;
12890 		pci_dev_put(peer);
12891 	}
12892 	/* 5704 can be configured in single-port mode, set peer to
12893 	 * tp->pdev in that case.
12894 	 */
12895 	if (!peer) {
12896 		peer = tp->pdev;
12897 		return peer;
12898 	}
12899 
12900 	/*
12901 	 * We don't need to keep the refcount elevated; there's no way
12902 	 * to remove one half of this device without removing the other
12903 	 */
12904 	pci_dev_put(peer);
12905 
12906 	return peer;
12907 }
12908 
tg3_init_coal(struct tg3 * tp)12909 static void __devinit tg3_init_coal(struct tg3 *tp)
12910 {
12911 	struct ethtool_coalesce *ec = &tp->coal;
12912 
12913 	memset(ec, 0, sizeof(*ec));
12914 	ec->cmd = ETHTOOL_GCOALESCE;
12915 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12916 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12917 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12918 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12919 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12920 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12921 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12922 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12923 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12924 
12925 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12926 				 HOSTCC_MODE_CLRTICK_TXBD)) {
12927 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12928 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12929 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12930 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12931 	}
12932 
12933 	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12934 		ec->rx_coalesce_usecs_irq = 0;
12935 		ec->tx_coalesce_usecs_irq = 0;
12936 		ec->stats_block_coalesce_usecs = 0;
12937 	}
12938 }
12939 
12940 static const struct net_device_ops tg3_netdev_ops = {
12941 	.ndo_open		= tg3_open,
12942 	.ndo_stop		= tg3_close,
12943 	.ndo_start_xmit		= tg3_start_xmit,
12944 	.ndo_get_stats		= tg3_get_stats,
12945 	.ndo_validate_addr	= eth_validate_addr,
12946 	.ndo_set_multicast_list	= tg3_set_rx_mode,
12947 	.ndo_set_mac_address	= tg3_set_mac_addr,
12948 	.ndo_do_ioctl		= tg3_ioctl,
12949 	.ndo_tx_timeout		= tg3_tx_timeout,
12950 	.ndo_change_mtu		= tg3_change_mtu,
12951 #if TG3_VLAN_TAG_USED
12952 	.ndo_vlan_rx_register	= tg3_vlan_rx_register,
12953 #endif
12954 #ifdef CONFIG_NET_POLL_CONTROLLER
12955 	.ndo_poll_controller	= tg3_poll_controller,
12956 #endif
12957 };
12958 
12959 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
12960 	.ndo_open		= tg3_open,
12961 	.ndo_stop		= tg3_close,
12962 	.ndo_start_xmit		= tg3_start_xmit_dma_bug,
12963 	.ndo_get_stats		= tg3_get_stats,
12964 	.ndo_validate_addr	= eth_validate_addr,
12965 	.ndo_set_multicast_list	= tg3_set_rx_mode,
12966 	.ndo_set_mac_address	= tg3_set_mac_addr,
12967 	.ndo_do_ioctl		= tg3_ioctl,
12968 	.ndo_tx_timeout		= tg3_tx_timeout,
12969 	.ndo_change_mtu		= tg3_change_mtu,
12970 #if TG3_VLAN_TAG_USED
12971 	.ndo_vlan_rx_register	= tg3_vlan_rx_register,
12972 #endif
12973 #ifdef CONFIG_NET_POLL_CONTROLLER
12974 	.ndo_poll_controller	= tg3_poll_controller,
12975 #endif
12976 };
12977 
tg3_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)12978 static int __devinit tg3_init_one(struct pci_dev *pdev,
12979 				  const struct pci_device_id *ent)
12980 {
12981 	static int tg3_version_printed = 0;
12982 	struct net_device *dev;
12983 	struct tg3 *tp;
12984 	int err, pm_cap;
12985 	char str[40];
12986 	u64 dma_mask, persist_dma_mask;
12987 
12988 	if (tg3_version_printed++ == 0)
12989 		printk(KERN_INFO "%s", version);
12990 
12991 	err = pci_enable_device(pdev);
12992 	if (err) {
12993 		printk(KERN_ERR PFX "Cannot enable PCI device, "
12994 		       "aborting.\n");
12995 		return err;
12996 	}
12997 
12998 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
12999 	if (err) {
13000 		printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13001 		       "aborting.\n");
13002 		goto err_out_disable_pdev;
13003 	}
13004 
13005 	pci_set_master(pdev);
13006 
13007 	/* Find power-management capability. */
13008 	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13009 	if (pm_cap == 0) {
13010 		printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13011 		       "aborting.\n");
13012 		err = -EIO;
13013 		goto err_out_free_res;
13014 	}
13015 
13016 	dev = alloc_etherdev(sizeof(*tp));
13017 	if (!dev) {
13018 		printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13019 		err = -ENOMEM;
13020 		goto err_out_free_res;
13021 	}
13022 
13023 	SET_NETDEV_DEV(dev, &pdev->dev);
13024 
13025 #if TG3_VLAN_TAG_USED
13026 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13027 #endif
13028 
13029 	tp = netdev_priv(dev);
13030 	tp->pdev = pdev;
13031 	tp->dev = dev;
13032 	tp->pm_cap = pm_cap;
13033 	tp->rx_mode = TG3_DEF_RX_MODE;
13034 	tp->tx_mode = TG3_DEF_TX_MODE;
13035 
13036 	if (tg3_debug > 0)
13037 		tp->msg_enable = tg3_debug;
13038 	else
13039 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
13040 
13041 	/* The word/byte swap controls here control register access byte
13042 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
13043 	 * setting below.
13044 	 */
13045 	tp->misc_host_ctrl =
13046 		MISC_HOST_CTRL_MASK_PCI_INT |
13047 		MISC_HOST_CTRL_WORD_SWAP |
13048 		MISC_HOST_CTRL_INDIR_ACCESS |
13049 		MISC_HOST_CTRL_PCISTATE_RW;
13050 
13051 	/* The NONFRM (non-frame) byte/word swap controls take effect
13052 	 * on descriptor entries, anything which isn't packet data.
13053 	 *
13054 	 * The StrongARM chips on the board (one for tx, one for rx)
13055 	 * are running in big-endian mode.
13056 	 */
13057 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13058 			GRC_MODE_WSWAP_NONFRM_DATA);
13059 #ifdef __BIG_ENDIAN
13060 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13061 #endif
13062 	spin_lock_init(&tp->lock);
13063 	spin_lock_init(&tp->indirect_lock);
13064 	INIT_WORK(&tp->reset_task, tg3_reset_task);
13065 
13066 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
13067 	if (!tp->regs) {
13068 		printk(KERN_ERR PFX "Cannot map device registers, "
13069 		       "aborting.\n");
13070 		err = -ENOMEM;
13071 		goto err_out_free_dev;
13072 	}
13073 
13074 	tg3_init_link_config(tp);
13075 
13076 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13077 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13078 	tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13079 
13080 	netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13081 	dev->ethtool_ops = &tg3_ethtool_ops;
13082 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
13083 	dev->irq = pdev->irq;
13084 
13085 	err = tg3_get_invariants(tp);
13086 	if (err) {
13087 		printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13088 		       "aborting.\n");
13089 		goto err_out_iounmap;
13090 	}
13091 
13092 	if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13093 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13094 		dev->netdev_ops = &tg3_netdev_ops;
13095 	else
13096 		dev->netdev_ops = &tg3_netdev_ops_dma_bug;
13097 
13098 
13099 	/* The EPB bridge inside 5714, 5715, and 5780 and any
13100 	 * device behind the EPB cannot support DMA addresses > 40-bit.
13101 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13102 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13103 	 * do DMA address check in tg3_start_xmit().
13104 	 */
13105 	if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13106 		persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13107 	else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13108 		persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13109 #ifdef CONFIG_HIGHMEM
13110 		dma_mask = DMA_64BIT_MASK;
13111 #endif
13112 	} else
13113 		persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13114 
13115 	/* Configure DMA attributes. */
13116 	if (dma_mask > DMA_32BIT_MASK) {
13117 		err = pci_set_dma_mask(pdev, dma_mask);
13118 		if (!err) {
13119 			dev->features |= NETIF_F_HIGHDMA;
13120 			err = pci_set_consistent_dma_mask(pdev,
13121 							  persist_dma_mask);
13122 			if (err < 0) {
13123 				printk(KERN_ERR PFX "Unable to obtain 64 bit "
13124 				       "DMA for consistent allocations\n");
13125 				goto err_out_iounmap;
13126 			}
13127 		}
13128 	}
13129 	if (err || dma_mask == DMA_32BIT_MASK) {
13130 		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13131 		if (err) {
13132 			printk(KERN_ERR PFX "No usable DMA configuration, "
13133 			       "aborting.\n");
13134 			goto err_out_iounmap;
13135 		}
13136 	}
13137 
13138 	tg3_init_bufmgr_config(tp);
13139 
13140 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13141 		tp->fw_needed = FIRMWARE_TG3;
13142 
13143 	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13144 		tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13145 	}
13146 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13147 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13148 	    tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13149 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13150 	    (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13151 		tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13152 	} else {
13153 		tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13154 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13155 			tp->fw_needed = FIRMWARE_TG3TSO5;
13156 		else
13157 			tp->fw_needed = FIRMWARE_TG3TSO;
13158 	}
13159 
13160 	/* TSO is on by default on chips that support hardware TSO.
13161 	 * Firmware TSO on older chips gives lower performance, so it
13162 	 * is off by default, but can be enabled using ethtool.
13163 	 */
13164 	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13165 		if (dev->features & NETIF_F_IP_CSUM)
13166 			dev->features |= NETIF_F_TSO;
13167 		if ((dev->features & NETIF_F_IPV6_CSUM) &&
13168 		    (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2))
13169 			dev->features |= NETIF_F_TSO6;
13170 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13171 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13172 		     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13173 			GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13174 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13175 			dev->features |= NETIF_F_TSO_ECN;
13176 	}
13177 
13178 
13179 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13180 	    !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13181 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13182 		tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13183 		tp->rx_pending = 63;
13184 	}
13185 
13186 	err = tg3_get_device_address(tp);
13187 	if (err) {
13188 		printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13189 		       "aborting.\n");
13190 		goto err_out_fw;
13191 	}
13192 
13193 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13194 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13195 		if (!tp->aperegs) {
13196 			printk(KERN_ERR PFX "Cannot map APE registers, "
13197 			       "aborting.\n");
13198 			err = -ENOMEM;
13199 			goto err_out_fw;
13200 		}
13201 
13202 		tg3_ape_lock_init(tp);
13203 	}
13204 
13205 	/*
13206 	 * Reset chip in case UNDI or EFI driver did not shutdown
13207 	 * DMA self test will enable WDMAC and we'll see (spurious)
13208 	 * pending DMA on the PCI bus at that point.
13209 	 */
13210 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13211 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13212 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13213 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13214 	}
13215 
13216 	err = tg3_test_dma(tp);
13217 	if (err) {
13218 		printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13219 		goto err_out_apeunmap;
13220 	}
13221 
13222 	/* flow control autonegotiation is default behavior */
13223 	tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13224 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13225 
13226 	tg3_init_coal(tp);
13227 
13228 	pci_set_drvdata(pdev, dev);
13229 
13230 	err = register_netdev(dev);
13231 	if (err) {
13232 		printk(KERN_ERR PFX "Cannot register net device, "
13233 		       "aborting.\n");
13234 		goto err_out_apeunmap;
13235 	}
13236 
13237 	printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
13238 	       dev->name,
13239 	       tp->board_part_number,
13240 	       tp->pci_chip_rev_id,
13241 	       tg3_bus_string(tp, str),
13242 	       dev->dev_addr);
13243 
13244 	if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13245 		printk(KERN_INFO
13246 		       "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13247 		       tp->dev->name,
13248 		       tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13249 		       dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
13250 	else
13251 		printk(KERN_INFO
13252 		       "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13253 		       tp->dev->name, tg3_phy_string(tp),
13254 		       ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13255 			((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13256 			 "10/100/1000Base-T")),
13257 		       (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13258 
13259 	printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
13260 	       dev->name,
13261 	       (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13262 	       (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13263 	       (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13264 	       (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13265 	       (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13266 	printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13267 	       dev->name, tp->dma_rwctrl,
13268 	       (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13269 	        (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13270 
13271 	return 0;
13272 
13273 err_out_apeunmap:
13274 	if (tp->aperegs) {
13275 		iounmap(tp->aperegs);
13276 		tp->aperegs = NULL;
13277 	}
13278 
13279 err_out_fw:
13280 	if (tp->fw)
13281 		release_firmware(tp->fw);
13282 
13283 err_out_iounmap:
13284 	if (tp->regs) {
13285 		iounmap(tp->regs);
13286 		tp->regs = NULL;
13287 	}
13288 
13289 err_out_free_dev:
13290 	free_netdev(dev);
13291 
13292 err_out_free_res:
13293 	pci_release_regions(pdev);
13294 
13295 err_out_disable_pdev:
13296 	pci_disable_device(pdev);
13297 	pci_set_drvdata(pdev, NULL);
13298 	return err;
13299 }
13300 
tg3_remove_one(struct pci_dev * pdev)13301 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13302 {
13303 	struct net_device *dev = pci_get_drvdata(pdev);
13304 
13305 	if (dev) {
13306 		struct tg3 *tp = netdev_priv(dev);
13307 
13308 		if (tp->fw)
13309 			release_firmware(tp->fw);
13310 
13311 		flush_scheduled_work();
13312 
13313 		if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13314 			tg3_phy_fini(tp);
13315 			tg3_mdio_fini(tp);
13316 		}
13317 
13318 		unregister_netdev(dev);
13319 		if (tp->aperegs) {
13320 			iounmap(tp->aperegs);
13321 			tp->aperegs = NULL;
13322 		}
13323 		if (tp->regs) {
13324 			iounmap(tp->regs);
13325 			tp->regs = NULL;
13326 		}
13327 		free_netdev(dev);
13328 		pci_release_regions(pdev);
13329 		pci_disable_device(pdev);
13330 		pci_set_drvdata(pdev, NULL);
13331 	}
13332 }
13333 
tg3_suspend(struct pci_dev * pdev,pm_message_t state)13334 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13335 {
13336 	struct net_device *dev = pci_get_drvdata(pdev);
13337 	struct tg3 *tp = netdev_priv(dev);
13338 	pci_power_t target_state;
13339 	int err;
13340 
13341 	/* PCI register 4 needs to be saved whether netif_running() or not.
13342 	 * MSI address and data need to be saved if using MSI and
13343 	 * netif_running().
13344 	 */
13345 	pci_save_state(pdev);
13346 
13347 	if (!netif_running(dev))
13348 		return 0;
13349 
13350 	flush_scheduled_work();
13351 	tg3_phy_stop(tp);
13352 	tg3_netif_stop(tp);
13353 
13354 	del_timer_sync(&tp->timer);
13355 
13356 	tg3_full_lock(tp, 1);
13357 	tg3_disable_ints(tp);
13358 	tg3_full_unlock(tp);
13359 
13360 	netif_device_detach(dev);
13361 
13362 	tg3_full_lock(tp, 0);
13363 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13364 	tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13365 	tg3_full_unlock(tp);
13366 
13367 	target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13368 
13369 	err = tg3_set_power_state(tp, target_state);
13370 	if (err) {
13371 		int err2;
13372 
13373 		tg3_full_lock(tp, 0);
13374 
13375 		tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13376 		err2 = tg3_restart_hw(tp, 1);
13377 		if (err2)
13378 			goto out;
13379 
13380 		tp->timer.expires = jiffies + tp->timer_offset;
13381 		add_timer(&tp->timer);
13382 
13383 		netif_device_attach(dev);
13384 		tg3_netif_start(tp);
13385 
13386 out:
13387 		tg3_full_unlock(tp);
13388 
13389 		if (!err2)
13390 			tg3_phy_start(tp);
13391 	}
13392 
13393 	return err;
13394 }
13395 
tg3_resume(struct pci_dev * pdev)13396 static int tg3_resume(struct pci_dev *pdev)
13397 {
13398 	struct net_device *dev = pci_get_drvdata(pdev);
13399 	struct tg3 *tp = netdev_priv(dev);
13400 	int err;
13401 
13402 	pci_restore_state(tp->pdev);
13403 
13404 	if (!netif_running(dev))
13405 		return 0;
13406 
13407 	err = tg3_set_power_state(tp, PCI_D0);
13408 	if (err)
13409 		return err;
13410 
13411 	netif_device_attach(dev);
13412 
13413 	tg3_full_lock(tp, 0);
13414 
13415 	tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13416 	err = tg3_restart_hw(tp, 1);
13417 	if (err)
13418 		goto out;
13419 
13420 	tp->timer.expires = jiffies + tp->timer_offset;
13421 	add_timer(&tp->timer);
13422 
13423 	tg3_netif_start(tp);
13424 
13425 out:
13426 	tg3_full_unlock(tp);
13427 
13428 	if (!err)
13429 		tg3_phy_start(tp);
13430 
13431 	return err;
13432 }
13433 
13434 static struct pci_driver tg3_driver = {
13435 	.name		= DRV_MODULE_NAME,
13436 	.id_table	= tg3_pci_tbl,
13437 	.probe		= tg3_init_one,
13438 	.remove		= __devexit_p(tg3_remove_one),
13439 	.suspend	= tg3_suspend,
13440 	.resume		= tg3_resume
13441 };
13442 
tg3_init(void)13443 static int __init tg3_init(void)
13444 {
13445 	return pci_register_driver(&tg3_driver);
13446 }
13447 
tg3_cleanup(void)13448 static void __exit tg3_cleanup(void)
13449 {
13450 	pci_unregister_driver(&tg3_driver);
13451 }
13452 
13453 module_init(tg3_init);
13454 module_exit(tg3_cleanup);
13455