1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2014 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/interrupt.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0 0
67 #define BAR_2 2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
_tg3_flag(enum TG3_FLAGS flag,unsigned long * bits)73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 return test_bit(flag, bits);
76 }
77
_tg3_flag_set(enum TG3_FLAGS flag,unsigned long * bits)78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 set_bit(flag, bits);
81 }
82
_tg3_flag_clear(enum TG3_FLAGS flag,unsigned long * bits)83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MAJ_NUM 3
97 #define TG3_MIN_NUM 137
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "May 11, 2014"
101
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
105
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
123
124 #define TG3_TX_TIMEOUT (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
150
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB 64
165
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
212 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
213
214 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
215 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
216
217 #define FIRMWARE_TG3 "tigon/tg3.bin"
218 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
219 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
220 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
221
222 static char version[] =
223 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
224
225 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
226 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
227 MODULE_LICENSE("GPL");
228 MODULE_VERSION(DRV_MODULE_VERSION);
229 MODULE_FIRMWARE(FIRMWARE_TG3);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
232
233 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
234 module_param(tg3_debug, int, 0);
235 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
236
237 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
238 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
239
240 static const struct pci_device_id tg3_pci_tbl[] = {
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 TG3_DRV_DATA_FLAG_5705_10_100},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
267 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
268 TG3_DRV_DATA_FLAG_5705_10_100},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
275 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
281 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
289 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
290 PCI_VENDOR_ID_LENOVO,
291 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
295 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
323 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
333 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
335 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
349 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
354 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
355 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356 {}
357 };
358
359 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
360
361 static const struct {
362 const char string[ETH_GSTRING_LEN];
363 } ethtool_stats_keys[] = {
364 { "rx_octets" },
365 { "rx_fragments" },
366 { "rx_ucast_packets" },
367 { "rx_mcast_packets" },
368 { "rx_bcast_packets" },
369 { "rx_fcs_errors" },
370 { "rx_align_errors" },
371 { "rx_xon_pause_rcvd" },
372 { "rx_xoff_pause_rcvd" },
373 { "rx_mac_ctrl_rcvd" },
374 { "rx_xoff_entered" },
375 { "rx_frame_too_long_errors" },
376 { "rx_jabbers" },
377 { "rx_undersize_packets" },
378 { "rx_in_length_errors" },
379 { "rx_out_length_errors" },
380 { "rx_64_or_less_octet_packets" },
381 { "rx_65_to_127_octet_packets" },
382 { "rx_128_to_255_octet_packets" },
383 { "rx_256_to_511_octet_packets" },
384 { "rx_512_to_1023_octet_packets" },
385 { "rx_1024_to_1522_octet_packets" },
386 { "rx_1523_to_2047_octet_packets" },
387 { "rx_2048_to_4095_octet_packets" },
388 { "rx_4096_to_8191_octet_packets" },
389 { "rx_8192_to_9022_octet_packets" },
390
391 { "tx_octets" },
392 { "tx_collisions" },
393
394 { "tx_xon_sent" },
395 { "tx_xoff_sent" },
396 { "tx_flow_control" },
397 { "tx_mac_errors" },
398 { "tx_single_collisions" },
399 { "tx_mult_collisions" },
400 { "tx_deferred" },
401 { "tx_excessive_collisions" },
402 { "tx_late_collisions" },
403 { "tx_collide_2times" },
404 { "tx_collide_3times" },
405 { "tx_collide_4times" },
406 { "tx_collide_5times" },
407 { "tx_collide_6times" },
408 { "tx_collide_7times" },
409 { "tx_collide_8times" },
410 { "tx_collide_9times" },
411 { "tx_collide_10times" },
412 { "tx_collide_11times" },
413 { "tx_collide_12times" },
414 { "tx_collide_13times" },
415 { "tx_collide_14times" },
416 { "tx_collide_15times" },
417 { "tx_ucast_packets" },
418 { "tx_mcast_packets" },
419 { "tx_bcast_packets" },
420 { "tx_carrier_sense_errors" },
421 { "tx_discards" },
422 { "tx_errors" },
423
424 { "dma_writeq_full" },
425 { "dma_write_prioq_full" },
426 { "rxbds_empty" },
427 { "rx_discards" },
428 { "rx_errors" },
429 { "rx_threshold_hit" },
430
431 { "dma_readq_full" },
432 { "dma_read_prioq_full" },
433 { "tx_comp_queue_full" },
434
435 { "ring_set_send_prod_index" },
436 { "ring_status_update" },
437 { "nic_irqs" },
438 { "nic_avoided_irqs" },
439 { "nic_tx_threshold_hit" },
440
441 { "mbuf_lwm_thresh_hit" },
442 };
443
444 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
445 #define TG3_NVRAM_TEST 0
446 #define TG3_LINK_TEST 1
447 #define TG3_REGISTER_TEST 2
448 #define TG3_MEMORY_TEST 3
449 #define TG3_MAC_LOOPB_TEST 4
450 #define TG3_PHY_LOOPB_TEST 5
451 #define TG3_EXT_LOOPB_TEST 6
452 #define TG3_INTERRUPT_TEST 7
453
454
455 static const struct {
456 const char string[ETH_GSTRING_LEN];
457 } ethtool_test_keys[] = {
458 [TG3_NVRAM_TEST] = { "nvram test (online) " },
459 [TG3_LINK_TEST] = { "link test (online) " },
460 [TG3_REGISTER_TEST] = { "register test (offline)" },
461 [TG3_MEMORY_TEST] = { "memory test (offline)" },
462 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
463 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
464 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
465 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
466 };
467
468 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
469
470
tg3_write32(struct tg3 * tp,u32 off,u32 val)471 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
472 {
473 writel(val, tp->regs + off);
474 }
475
tg3_read32(struct tg3 * tp,u32 off)476 static u32 tg3_read32(struct tg3 *tp, u32 off)
477 {
478 return readl(tp->regs + off);
479 }
480
tg3_ape_write32(struct tg3 * tp,u32 off,u32 val)481 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
482 {
483 writel(val, tp->aperegs + off);
484 }
485
tg3_ape_read32(struct tg3 * tp,u32 off)486 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
487 {
488 return readl(tp->aperegs + off);
489 }
490
tg3_write_indirect_reg32(struct tg3 * tp,u32 off,u32 val)491 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
492 {
493 unsigned long flags;
494
495 spin_lock_irqsave(&tp->indirect_lock, flags);
496 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
497 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
498 spin_unlock_irqrestore(&tp->indirect_lock, flags);
499 }
500
tg3_write_flush_reg32(struct tg3 * tp,u32 off,u32 val)501 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
502 {
503 writel(val, tp->regs + off);
504 readl(tp->regs + off);
505 }
506
tg3_read_indirect_reg32(struct tg3 * tp,u32 off)507 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
508 {
509 unsigned long flags;
510 u32 val;
511
512 spin_lock_irqsave(&tp->indirect_lock, flags);
513 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
514 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
515 spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 return val;
517 }
518
tg3_write_indirect_mbox(struct tg3 * tp,u32 off,u32 val)519 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
520 {
521 unsigned long flags;
522
523 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
524 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
525 TG3_64BIT_REG_LOW, val);
526 return;
527 }
528 if (off == TG3_RX_STD_PROD_IDX_REG) {
529 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
530 TG3_64BIT_REG_LOW, val);
531 return;
532 }
533
534 spin_lock_irqsave(&tp->indirect_lock, flags);
535 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
536 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
537 spin_unlock_irqrestore(&tp->indirect_lock, flags);
538
539 /* In indirect mode when disabling interrupts, we also need
540 * to clear the interrupt bit in the GRC local ctrl register.
541 */
542 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
543 (val == 0x1)) {
544 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
545 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546 }
547 }
548
tg3_read_indirect_mbox(struct tg3 * tp,u32 off)549 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
550 {
551 unsigned long flags;
552 u32 val;
553
554 spin_lock_irqsave(&tp->indirect_lock, flags);
555 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
556 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
557 spin_unlock_irqrestore(&tp->indirect_lock, flags);
558 return val;
559 }
560
561 /* usec_wait specifies the wait time in usec when writing to certain registers
562 * where it is unsafe to read back the register without some delay.
563 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
564 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
565 */
_tw32_flush(struct tg3 * tp,u32 off,u32 val,u32 usec_wait)566 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
567 {
568 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
569 /* Non-posted methods */
570 tp->write32(tp, off, val);
571 else {
572 /* Posted method */
573 tg3_write32(tp, off, val);
574 if (usec_wait)
575 udelay(usec_wait);
576 tp->read32(tp, off);
577 }
578 /* Wait again after the read for the posted method to guarantee that
579 * the wait time is met.
580 */
581 if (usec_wait)
582 udelay(usec_wait);
583 }
584
tw32_mailbox_flush(struct tg3 * tp,u32 off,u32 val)585 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
586 {
587 tp->write32_mbox(tp, off, val);
588 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
589 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
590 !tg3_flag(tp, ICH_WORKAROUND)))
591 tp->read32_mbox(tp, off);
592 }
593
tg3_write32_tx_mbox(struct tg3 * tp,u32 off,u32 val)594 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
595 {
596 void __iomem *mbox = tp->regs + off;
597 writel(val, mbox);
598 if (tg3_flag(tp, TXD_MBOX_HWBUG))
599 writel(val, mbox);
600 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
601 tg3_flag(tp, FLUSH_POSTED_WRITES))
602 readl(mbox);
603 }
604
tg3_read32_mbox_5906(struct tg3 * tp,u32 off)605 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
606 {
607 return readl(tp->regs + off + GRCMBOX_BASE);
608 }
609
tg3_write32_mbox_5906(struct tg3 * tp,u32 off,u32 val)610 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
611 {
612 writel(val, tp->regs + off + GRCMBOX_BASE);
613 }
614
615 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
616 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
617 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
618 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
619 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
620
621 #define tw32(reg, val) tp->write32(tp, reg, val)
622 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
623 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
624 #define tr32(reg) tp->read32(tp, reg)
625
tg3_write_mem(struct tg3 * tp,u32 off,u32 val)626 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
627 {
628 unsigned long flags;
629
630 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
631 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
632 return;
633
634 spin_lock_irqsave(&tp->indirect_lock, flags);
635 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
637 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
638
639 /* Always leave this as zero. */
640 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
641 } else {
642 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
643 tw32_f(TG3PCI_MEM_WIN_DATA, val);
644
645 /* Always leave this as zero. */
646 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
647 }
648 spin_unlock_irqrestore(&tp->indirect_lock, flags);
649 }
650
tg3_read_mem(struct tg3 * tp,u32 off,u32 * val)651 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
652 {
653 unsigned long flags;
654
655 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
656 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
657 *val = 0;
658 return;
659 }
660
661 spin_lock_irqsave(&tp->indirect_lock, flags);
662 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
663 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
664 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
665
666 /* Always leave this as zero. */
667 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
668 } else {
669 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
670 *val = tr32(TG3PCI_MEM_WIN_DATA);
671
672 /* Always leave this as zero. */
673 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
674 }
675 spin_unlock_irqrestore(&tp->indirect_lock, flags);
676 }
677
tg3_ape_lock_init(struct tg3 * tp)678 static void tg3_ape_lock_init(struct tg3 *tp)
679 {
680 int i;
681 u32 regbase, bit;
682
683 if (tg3_asic_rev(tp) == ASIC_REV_5761)
684 regbase = TG3_APE_LOCK_GRANT;
685 else
686 regbase = TG3_APE_PER_LOCK_GRANT;
687
688 /* Make sure the driver hasn't any stale locks. */
689 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
690 switch (i) {
691 case TG3_APE_LOCK_PHY0:
692 case TG3_APE_LOCK_PHY1:
693 case TG3_APE_LOCK_PHY2:
694 case TG3_APE_LOCK_PHY3:
695 bit = APE_LOCK_GRANT_DRIVER;
696 break;
697 default:
698 if (!tp->pci_fn)
699 bit = APE_LOCK_GRANT_DRIVER;
700 else
701 bit = 1 << tp->pci_fn;
702 }
703 tg3_ape_write32(tp, regbase + 4 * i, bit);
704 }
705
706 }
707
tg3_ape_lock(struct tg3 * tp,int locknum)708 static int tg3_ape_lock(struct tg3 *tp, int locknum)
709 {
710 int i, off;
711 int ret = 0;
712 u32 status, req, gnt, bit;
713
714 if (!tg3_flag(tp, ENABLE_APE))
715 return 0;
716
717 switch (locknum) {
718 case TG3_APE_LOCK_GPIO:
719 if (tg3_asic_rev(tp) == ASIC_REV_5761)
720 return 0;
721 case TG3_APE_LOCK_GRC:
722 case TG3_APE_LOCK_MEM:
723 if (!tp->pci_fn)
724 bit = APE_LOCK_REQ_DRIVER;
725 else
726 bit = 1 << tp->pci_fn;
727 break;
728 case TG3_APE_LOCK_PHY0:
729 case TG3_APE_LOCK_PHY1:
730 case TG3_APE_LOCK_PHY2:
731 case TG3_APE_LOCK_PHY3:
732 bit = APE_LOCK_REQ_DRIVER;
733 break;
734 default:
735 return -EINVAL;
736 }
737
738 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739 req = TG3_APE_LOCK_REQ;
740 gnt = TG3_APE_LOCK_GRANT;
741 } else {
742 req = TG3_APE_PER_LOCK_REQ;
743 gnt = TG3_APE_PER_LOCK_GRANT;
744 }
745
746 off = 4 * locknum;
747
748 tg3_ape_write32(tp, req + off, bit);
749
750 /* Wait for up to 1 millisecond to acquire lock. */
751 for (i = 0; i < 100; i++) {
752 status = tg3_ape_read32(tp, gnt + off);
753 if (status == bit)
754 break;
755 if (pci_channel_offline(tp->pdev))
756 break;
757
758 udelay(10);
759 }
760
761 if (status != bit) {
762 /* Revoke the lock request. */
763 tg3_ape_write32(tp, gnt + off, bit);
764 ret = -EBUSY;
765 }
766
767 return ret;
768 }
769
tg3_ape_unlock(struct tg3 * tp,int locknum)770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 {
772 u32 gnt, bit;
773
774 if (!tg3_flag(tp, ENABLE_APE))
775 return;
776
777 switch (locknum) {
778 case TG3_APE_LOCK_GPIO:
779 if (tg3_asic_rev(tp) == ASIC_REV_5761)
780 return;
781 case TG3_APE_LOCK_GRC:
782 case TG3_APE_LOCK_MEM:
783 if (!tp->pci_fn)
784 bit = APE_LOCK_GRANT_DRIVER;
785 else
786 bit = 1 << tp->pci_fn;
787 break;
788 case TG3_APE_LOCK_PHY0:
789 case TG3_APE_LOCK_PHY1:
790 case TG3_APE_LOCK_PHY2:
791 case TG3_APE_LOCK_PHY3:
792 bit = APE_LOCK_GRANT_DRIVER;
793 break;
794 default:
795 return;
796 }
797
798 if (tg3_asic_rev(tp) == ASIC_REV_5761)
799 gnt = TG3_APE_LOCK_GRANT;
800 else
801 gnt = TG3_APE_PER_LOCK_GRANT;
802
803 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
804 }
805
tg3_ape_event_lock(struct tg3 * tp,u32 timeout_us)806 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
807 {
808 u32 apedata;
809
810 while (timeout_us) {
811 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
812 return -EBUSY;
813
814 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
815 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
816 break;
817
818 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
819
820 udelay(10);
821 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
822 }
823
824 return timeout_us ? 0 : -EBUSY;
825 }
826
tg3_ape_wait_for_event(struct tg3 * tp,u32 timeout_us)827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
828 {
829 u32 i, apedata;
830
831 for (i = 0; i < timeout_us / 10; i++) {
832 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
833
834 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
835 break;
836
837 udelay(10);
838 }
839
840 return i == timeout_us / 10;
841 }
842
tg3_ape_scratchpad_read(struct tg3 * tp,u32 * data,u32 base_off,u32 len)843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
844 u32 len)
845 {
846 int err;
847 u32 i, bufoff, msgoff, maxlen, apedata;
848
849 if (!tg3_flag(tp, APE_HAS_NCSI))
850 return 0;
851
852 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853 if (apedata != APE_SEG_SIG_MAGIC)
854 return -ENODEV;
855
856 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857 if (!(apedata & APE_FW_STATUS_READY))
858 return -EAGAIN;
859
860 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
861 TG3_APE_SHMEM_BASE;
862 msgoff = bufoff + 2 * sizeof(u32);
863 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
864
865 while (len) {
866 u32 length;
867
868 /* Cap xfer sizes to scratchpad limits. */
869 length = (len > maxlen) ? maxlen : len;
870 len -= length;
871
872 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873 if (!(apedata & APE_FW_STATUS_READY))
874 return -EAGAIN;
875
876 /* Wait for up to 1 msec for APE to service previous event. */
877 err = tg3_ape_event_lock(tp, 1000);
878 if (err)
879 return err;
880
881 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882 APE_EVENT_STATUS_SCRTCHPD_READ |
883 APE_EVENT_STATUS_EVENT_PENDING;
884 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
885
886 tg3_ape_write32(tp, bufoff, base_off);
887 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
888
889 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
891
892 base_off += length;
893
894 if (tg3_ape_wait_for_event(tp, 30000))
895 return -EAGAIN;
896
897 for (i = 0; length; i += 4, length -= 4) {
898 u32 val = tg3_ape_read32(tp, msgoff + i);
899 memcpy(data, &val, sizeof(u32));
900 data++;
901 }
902 }
903
904 return 0;
905 }
906
tg3_ape_send_event(struct tg3 * tp,u32 event)907 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
908 {
909 int err;
910 u32 apedata;
911
912 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
913 if (apedata != APE_SEG_SIG_MAGIC)
914 return -EAGAIN;
915
916 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
917 if (!(apedata & APE_FW_STATUS_READY))
918 return -EAGAIN;
919
920 /* Wait for up to 1 millisecond for APE to service previous event. */
921 err = tg3_ape_event_lock(tp, 1000);
922 if (err)
923 return err;
924
925 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
926 event | APE_EVENT_STATUS_EVENT_PENDING);
927
928 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
929 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
930
931 return 0;
932 }
933
tg3_ape_driver_state_change(struct tg3 * tp,int kind)934 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
935 {
936 u32 event;
937 u32 apedata;
938
939 if (!tg3_flag(tp, ENABLE_APE))
940 return;
941
942 switch (kind) {
943 case RESET_KIND_INIT:
944 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
945 APE_HOST_SEG_SIG_MAGIC);
946 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
947 APE_HOST_SEG_LEN_MAGIC);
948 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
949 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
950 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
951 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
952 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
953 APE_HOST_BEHAV_NO_PHYLOCK);
954 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
955 TG3_APE_HOST_DRVR_STATE_START);
956
957 event = APE_EVENT_STATUS_STATE_START;
958 break;
959 case RESET_KIND_SHUTDOWN:
960 /* With the interface we are currently using,
961 * APE does not track driver state. Wiping
962 * out the HOST SEGMENT SIGNATURE forces
963 * the APE to assume OS absent status.
964 */
965 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
966
967 if (device_may_wakeup(&tp->pdev->dev) &&
968 tg3_flag(tp, WOL_ENABLE)) {
969 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
970 TG3_APE_HOST_WOL_SPEED_AUTO);
971 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
972 } else
973 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
974
975 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
976
977 event = APE_EVENT_STATUS_STATE_UNLOAD;
978 break;
979 default:
980 return;
981 }
982
983 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
984
985 tg3_ape_send_event(tp, event);
986 }
987
tg3_disable_ints(struct tg3 * tp)988 static void tg3_disable_ints(struct tg3 *tp)
989 {
990 int i;
991
992 tw32(TG3PCI_MISC_HOST_CTRL,
993 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
994 for (i = 0; i < tp->irq_max; i++)
995 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
996 }
997
tg3_enable_ints(struct tg3 * tp)998 static void tg3_enable_ints(struct tg3 *tp)
999 {
1000 int i;
1001
1002 tp->irq_sync = 0;
1003 wmb();
1004
1005 tw32(TG3PCI_MISC_HOST_CTRL,
1006 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1007
1008 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1009 for (i = 0; i < tp->irq_cnt; i++) {
1010 struct tg3_napi *tnapi = &tp->napi[i];
1011
1012 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1013 if (tg3_flag(tp, 1SHOT_MSI))
1014 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1015
1016 tp->coal_now |= tnapi->coal_now;
1017 }
1018
1019 /* Force an initial interrupt */
1020 if (!tg3_flag(tp, TAGGED_STATUS) &&
1021 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1022 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1023 else
1024 tw32(HOSTCC_MODE, tp->coal_now);
1025
1026 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1027 }
1028
tg3_has_work(struct tg3_napi * tnapi)1029 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1030 {
1031 struct tg3 *tp = tnapi->tp;
1032 struct tg3_hw_status *sblk = tnapi->hw_status;
1033 unsigned int work_exists = 0;
1034
1035 /* check for phy events */
1036 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1037 if (sblk->status & SD_STATUS_LINK_CHG)
1038 work_exists = 1;
1039 }
1040
1041 /* check for TX work to do */
1042 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1043 work_exists = 1;
1044
1045 /* check for RX work to do */
1046 if (tnapi->rx_rcb_prod_idx &&
1047 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1048 work_exists = 1;
1049
1050 return work_exists;
1051 }
1052
1053 /* tg3_int_reenable
1054 * similar to tg3_enable_ints, but it accurately determines whether there
1055 * is new work pending and can return without flushing the PIO write
1056 * which reenables interrupts
1057 */
tg3_int_reenable(struct tg3_napi * tnapi)1058 static void tg3_int_reenable(struct tg3_napi *tnapi)
1059 {
1060 struct tg3 *tp = tnapi->tp;
1061
1062 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1063 mmiowb();
1064
1065 /* When doing tagged status, this work check is unnecessary.
1066 * The last_tag we write above tells the chip which piece of
1067 * work we've completed.
1068 */
1069 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1070 tw32(HOSTCC_MODE, tp->coalesce_mode |
1071 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1072 }
1073
tg3_switch_clocks(struct tg3 * tp)1074 static void tg3_switch_clocks(struct tg3 *tp)
1075 {
1076 u32 clock_ctrl;
1077 u32 orig_clock_ctrl;
1078
1079 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1080 return;
1081
1082 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1083
1084 orig_clock_ctrl = clock_ctrl;
1085 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1086 CLOCK_CTRL_CLKRUN_OENABLE |
1087 0x1f);
1088 tp->pci_clock_ctrl = clock_ctrl;
1089
1090 if (tg3_flag(tp, 5705_PLUS)) {
1091 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1094 }
1095 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1097 clock_ctrl |
1098 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1099 40);
1100 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1102 40);
1103 }
1104 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1105 }
1106
1107 #define PHY_BUSY_LOOPS 5000
1108
__tg3_readphy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 * val)1109 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1110 u32 *val)
1111 {
1112 u32 frame_val;
1113 unsigned int loops;
1114 int ret;
1115
1116 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1117 tw32_f(MAC_MI_MODE,
1118 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1119 udelay(80);
1120 }
1121
1122 tg3_ape_lock(tp, tp->phy_ape_lock);
1123
1124 *val = 0x0;
1125
1126 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1127 MI_COM_PHY_ADDR_MASK);
1128 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1129 MI_COM_REG_ADDR_MASK);
1130 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1131
1132 tw32_f(MAC_MI_COM, frame_val);
1133
1134 loops = PHY_BUSY_LOOPS;
1135 while (loops != 0) {
1136 udelay(10);
1137 frame_val = tr32(MAC_MI_COM);
1138
1139 if ((frame_val & MI_COM_BUSY) == 0) {
1140 udelay(5);
1141 frame_val = tr32(MAC_MI_COM);
1142 break;
1143 }
1144 loops -= 1;
1145 }
1146
1147 ret = -EBUSY;
1148 if (loops != 0) {
1149 *val = frame_val & MI_COM_DATA_MASK;
1150 ret = 0;
1151 }
1152
1153 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1154 tw32_f(MAC_MI_MODE, tp->mi_mode);
1155 udelay(80);
1156 }
1157
1158 tg3_ape_unlock(tp, tp->phy_ape_lock);
1159
1160 return ret;
1161 }
1162
tg3_readphy(struct tg3 * tp,int reg,u32 * val)1163 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1164 {
1165 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1166 }
1167
__tg3_writephy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 val)1168 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1169 u32 val)
1170 {
1171 u32 frame_val;
1172 unsigned int loops;
1173 int ret;
1174
1175 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1176 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1177 return 0;
1178
1179 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1180 tw32_f(MAC_MI_MODE,
1181 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1182 udelay(80);
1183 }
1184
1185 tg3_ape_lock(tp, tp->phy_ape_lock);
1186
1187 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1188 MI_COM_PHY_ADDR_MASK);
1189 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1190 MI_COM_REG_ADDR_MASK);
1191 frame_val |= (val & MI_COM_DATA_MASK);
1192 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1193
1194 tw32_f(MAC_MI_COM, frame_val);
1195
1196 loops = PHY_BUSY_LOOPS;
1197 while (loops != 0) {
1198 udelay(10);
1199 frame_val = tr32(MAC_MI_COM);
1200 if ((frame_val & MI_COM_BUSY) == 0) {
1201 udelay(5);
1202 frame_val = tr32(MAC_MI_COM);
1203 break;
1204 }
1205 loops -= 1;
1206 }
1207
1208 ret = -EBUSY;
1209 if (loops != 0)
1210 ret = 0;
1211
1212 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1213 tw32_f(MAC_MI_MODE, tp->mi_mode);
1214 udelay(80);
1215 }
1216
1217 tg3_ape_unlock(tp, tp->phy_ape_lock);
1218
1219 return ret;
1220 }
1221
tg3_writephy(struct tg3 * tp,int reg,u32 val)1222 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1223 {
1224 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1225 }
1226
tg3_phy_cl45_write(struct tg3 * tp,u32 devad,u32 addr,u32 val)1227 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1228 {
1229 int err;
1230
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1232 if (err)
1233 goto done;
1234
1235 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1236 if (err)
1237 goto done;
1238
1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1240 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1241 if (err)
1242 goto done;
1243
1244 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1245
1246 done:
1247 return err;
1248 }
1249
tg3_phy_cl45_read(struct tg3 * tp,u32 devad,u32 addr,u32 * val)1250 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1251 {
1252 int err;
1253
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1255 if (err)
1256 goto done;
1257
1258 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1259 if (err)
1260 goto done;
1261
1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1263 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1264 if (err)
1265 goto done;
1266
1267 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1268
1269 done:
1270 return err;
1271 }
1272
tg3_phydsp_read(struct tg3 * tp,u32 reg,u32 * val)1273 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1274 {
1275 int err;
1276
1277 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1278 if (!err)
1279 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1280
1281 return err;
1282 }
1283
tg3_phydsp_write(struct tg3 * tp,u32 reg,u32 val)1284 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1285 {
1286 int err;
1287
1288 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1289 if (!err)
1290 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1291
1292 return err;
1293 }
1294
tg3_phy_auxctl_read(struct tg3 * tp,int reg,u32 * val)1295 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1296 {
1297 int err;
1298
1299 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1300 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1301 MII_TG3_AUXCTL_SHDWSEL_MISC);
1302 if (!err)
1303 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1304
1305 return err;
1306 }
1307
tg3_phy_auxctl_write(struct tg3 * tp,int reg,u32 set)1308 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1309 {
1310 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1311 set |= MII_TG3_AUXCTL_MISC_WREN;
1312
1313 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1314 }
1315
tg3_phy_toggle_auxctl_smdsp(struct tg3 * tp,bool enable)1316 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1317 {
1318 u32 val;
1319 int err;
1320
1321 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1322
1323 if (err)
1324 return err;
1325
1326 if (enable)
1327 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1328 else
1329 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1330
1331 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1332 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1333
1334 return err;
1335 }
1336
tg3_phy_shdw_write(struct tg3 * tp,int reg,u32 val)1337 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1338 {
1339 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1340 reg | val | MII_TG3_MISC_SHDW_WREN);
1341 }
1342
tg3_bmcr_reset(struct tg3 * tp)1343 static int tg3_bmcr_reset(struct tg3 *tp)
1344 {
1345 u32 phy_control;
1346 int limit, err;
1347
1348 /* OK, reset it, and poll the BMCR_RESET bit until it
1349 * clears or we time out.
1350 */
1351 phy_control = BMCR_RESET;
1352 err = tg3_writephy(tp, MII_BMCR, phy_control);
1353 if (err != 0)
1354 return -EBUSY;
1355
1356 limit = 5000;
1357 while (limit--) {
1358 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1359 if (err != 0)
1360 return -EBUSY;
1361
1362 if ((phy_control & BMCR_RESET) == 0) {
1363 udelay(40);
1364 break;
1365 }
1366 udelay(10);
1367 }
1368 if (limit < 0)
1369 return -EBUSY;
1370
1371 return 0;
1372 }
1373
tg3_mdio_read(struct mii_bus * bp,int mii_id,int reg)1374 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1375 {
1376 struct tg3 *tp = bp->priv;
1377 u32 val;
1378
1379 spin_lock_bh(&tp->lock);
1380
1381 if (__tg3_readphy(tp, mii_id, reg, &val))
1382 val = -EIO;
1383
1384 spin_unlock_bh(&tp->lock);
1385
1386 return val;
1387 }
1388
tg3_mdio_write(struct mii_bus * bp,int mii_id,int reg,u16 val)1389 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1390 {
1391 struct tg3 *tp = bp->priv;
1392 u32 ret = 0;
1393
1394 spin_lock_bh(&tp->lock);
1395
1396 if (__tg3_writephy(tp, mii_id, reg, val))
1397 ret = -EIO;
1398
1399 spin_unlock_bh(&tp->lock);
1400
1401 return ret;
1402 }
1403
tg3_mdio_config_5785(struct tg3 * tp)1404 static void tg3_mdio_config_5785(struct tg3 *tp)
1405 {
1406 u32 val;
1407 struct phy_device *phydev;
1408
1409 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1410 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1411 case PHY_ID_BCM50610:
1412 case PHY_ID_BCM50610M:
1413 val = MAC_PHYCFG2_50610_LED_MODES;
1414 break;
1415 case PHY_ID_BCMAC131:
1416 val = MAC_PHYCFG2_AC131_LED_MODES;
1417 break;
1418 case PHY_ID_RTL8211C:
1419 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1420 break;
1421 case PHY_ID_RTL8201E:
1422 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1423 break;
1424 default:
1425 return;
1426 }
1427
1428 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1429 tw32(MAC_PHYCFG2, val);
1430
1431 val = tr32(MAC_PHYCFG1);
1432 val &= ~(MAC_PHYCFG1_RGMII_INT |
1433 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1434 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1435 tw32(MAC_PHYCFG1, val);
1436
1437 return;
1438 }
1439
1440 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1441 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1442 MAC_PHYCFG2_FMODE_MASK_MASK |
1443 MAC_PHYCFG2_GMODE_MASK_MASK |
1444 MAC_PHYCFG2_ACT_MASK_MASK |
1445 MAC_PHYCFG2_QUAL_MASK_MASK |
1446 MAC_PHYCFG2_INBAND_ENABLE;
1447
1448 tw32(MAC_PHYCFG2, val);
1449
1450 val = tr32(MAC_PHYCFG1);
1451 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1452 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1453 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1454 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1455 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1456 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1457 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1458 }
1459 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1460 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1461 tw32(MAC_PHYCFG1, val);
1462
1463 val = tr32(MAC_EXT_RGMII_MODE);
1464 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET |
1468 MAC_RGMII_MODE_TX_ENABLE |
1469 MAC_RGMII_MODE_TX_LOWPWR |
1470 MAC_RGMII_MODE_TX_RESET);
1471 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1472 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1473 val |= MAC_RGMII_MODE_RX_INT_B |
1474 MAC_RGMII_MODE_RX_QUALITY |
1475 MAC_RGMII_MODE_RX_ACTIVITY |
1476 MAC_RGMII_MODE_RX_ENG_DET;
1477 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1478 val |= MAC_RGMII_MODE_TX_ENABLE |
1479 MAC_RGMII_MODE_TX_LOWPWR |
1480 MAC_RGMII_MODE_TX_RESET;
1481 }
1482 tw32(MAC_EXT_RGMII_MODE, val);
1483 }
1484
tg3_mdio_start(struct tg3 * tp)1485 static void tg3_mdio_start(struct tg3 *tp)
1486 {
1487 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1488 tw32_f(MAC_MI_MODE, tp->mi_mode);
1489 udelay(80);
1490
1491 if (tg3_flag(tp, MDIOBUS_INITED) &&
1492 tg3_asic_rev(tp) == ASIC_REV_5785)
1493 tg3_mdio_config_5785(tp);
1494 }
1495
tg3_mdio_init(struct tg3 * tp)1496 static int tg3_mdio_init(struct tg3 *tp)
1497 {
1498 int i;
1499 u32 reg;
1500 struct phy_device *phydev;
1501
1502 if (tg3_flag(tp, 5717_PLUS)) {
1503 u32 is_serdes;
1504
1505 tp->phy_addr = tp->pci_fn + 1;
1506
1507 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1508 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1509 else
1510 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1511 TG3_CPMU_PHY_STRAP_IS_SERDES;
1512 if (is_serdes)
1513 tp->phy_addr += 7;
1514 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1515 int addr;
1516
1517 addr = ssb_gige_get_phyaddr(tp->pdev);
1518 if (addr < 0)
1519 return addr;
1520 tp->phy_addr = addr;
1521 } else
1522 tp->phy_addr = TG3_PHY_MII_ADDR;
1523
1524 tg3_mdio_start(tp);
1525
1526 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1527 return 0;
1528
1529 tp->mdio_bus = mdiobus_alloc();
1530 if (tp->mdio_bus == NULL)
1531 return -ENOMEM;
1532
1533 tp->mdio_bus->name = "tg3 mdio bus";
1534 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1535 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1536 tp->mdio_bus->priv = tp;
1537 tp->mdio_bus->parent = &tp->pdev->dev;
1538 tp->mdio_bus->read = &tg3_mdio_read;
1539 tp->mdio_bus->write = &tg3_mdio_write;
1540 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1541 tp->mdio_bus->irq = &tp->mdio_irq[0];
1542
1543 for (i = 0; i < PHY_MAX_ADDR; i++)
1544 tp->mdio_bus->irq[i] = PHY_POLL;
1545
1546 /* The bus registration will look for all the PHYs on the mdio bus.
1547 * Unfortunately, it does not ensure the PHY is powered up before
1548 * accessing the PHY ID registers. A chip reset is the
1549 * quickest way to bring the device back to an operational state..
1550 */
1551 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1552 tg3_bmcr_reset(tp);
1553
1554 i = mdiobus_register(tp->mdio_bus);
1555 if (i) {
1556 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1557 mdiobus_free(tp->mdio_bus);
1558 return i;
1559 }
1560
1561 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1562
1563 if (!phydev || !phydev->drv) {
1564 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1565 mdiobus_unregister(tp->mdio_bus);
1566 mdiobus_free(tp->mdio_bus);
1567 return -ENODEV;
1568 }
1569
1570 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1571 case PHY_ID_BCM57780:
1572 phydev->interface = PHY_INTERFACE_MODE_GMII;
1573 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1574 break;
1575 case PHY_ID_BCM50610:
1576 case PHY_ID_BCM50610M:
1577 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1578 PHY_BRCM_RX_REFCLK_UNUSED |
1579 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1580 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1581 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1582 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1583 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1584 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1585 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1586 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1587 /* fallthru */
1588 case PHY_ID_RTL8211C:
1589 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1590 break;
1591 case PHY_ID_RTL8201E:
1592 case PHY_ID_BCMAC131:
1593 phydev->interface = PHY_INTERFACE_MODE_MII;
1594 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1595 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1596 break;
1597 }
1598
1599 tg3_flag_set(tp, MDIOBUS_INITED);
1600
1601 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1602 tg3_mdio_config_5785(tp);
1603
1604 return 0;
1605 }
1606
tg3_mdio_fini(struct tg3 * tp)1607 static void tg3_mdio_fini(struct tg3 *tp)
1608 {
1609 if (tg3_flag(tp, MDIOBUS_INITED)) {
1610 tg3_flag_clear(tp, MDIOBUS_INITED);
1611 mdiobus_unregister(tp->mdio_bus);
1612 mdiobus_free(tp->mdio_bus);
1613 }
1614 }
1615
1616 /* tp->lock is held. */
tg3_generate_fw_event(struct tg3 * tp)1617 static inline void tg3_generate_fw_event(struct tg3 *tp)
1618 {
1619 u32 val;
1620
1621 val = tr32(GRC_RX_CPU_EVENT);
1622 val |= GRC_RX_CPU_DRIVER_EVENT;
1623 tw32_f(GRC_RX_CPU_EVENT, val);
1624
1625 tp->last_event_jiffies = jiffies;
1626 }
1627
1628 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1629
1630 /* tp->lock is held. */
tg3_wait_for_event_ack(struct tg3 * tp)1631 static void tg3_wait_for_event_ack(struct tg3 *tp)
1632 {
1633 int i;
1634 unsigned int delay_cnt;
1635 long time_remain;
1636
1637 /* If enough time has passed, no wait is necessary. */
1638 time_remain = (long)(tp->last_event_jiffies + 1 +
1639 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1640 (long)jiffies;
1641 if (time_remain < 0)
1642 return;
1643
1644 /* Check if we can shorten the wait time. */
1645 delay_cnt = jiffies_to_usecs(time_remain);
1646 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1647 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1648 delay_cnt = (delay_cnt >> 3) + 1;
1649
1650 for (i = 0; i < delay_cnt; i++) {
1651 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1652 break;
1653 if (pci_channel_offline(tp->pdev))
1654 break;
1655
1656 udelay(8);
1657 }
1658 }
1659
1660 /* tp->lock is held. */
tg3_phy_gather_ump_data(struct tg3 * tp,u32 * data)1661 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1662 {
1663 u32 reg, val;
1664
1665 val = 0;
1666 if (!tg3_readphy(tp, MII_BMCR, ®))
1667 val = reg << 16;
1668 if (!tg3_readphy(tp, MII_BMSR, ®))
1669 val |= (reg & 0xffff);
1670 *data++ = val;
1671
1672 val = 0;
1673 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1674 val = reg << 16;
1675 if (!tg3_readphy(tp, MII_LPA, ®))
1676 val |= (reg & 0xffff);
1677 *data++ = val;
1678
1679 val = 0;
1680 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1681 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1682 val = reg << 16;
1683 if (!tg3_readphy(tp, MII_STAT1000, ®))
1684 val |= (reg & 0xffff);
1685 }
1686 *data++ = val;
1687
1688 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1689 val = reg << 16;
1690 else
1691 val = 0;
1692 *data++ = val;
1693 }
1694
1695 /* tp->lock is held. */
tg3_ump_link_report(struct tg3 * tp)1696 static void tg3_ump_link_report(struct tg3 *tp)
1697 {
1698 u32 data[4];
1699
1700 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1701 return;
1702
1703 tg3_phy_gather_ump_data(tp, data);
1704
1705 tg3_wait_for_event_ack(tp);
1706
1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1710 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1711 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1712 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1713
1714 tg3_generate_fw_event(tp);
1715 }
1716
1717 /* tp->lock is held. */
tg3_stop_fw(struct tg3 * tp)1718 static void tg3_stop_fw(struct tg3 *tp)
1719 {
1720 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1721 /* Wait for RX cpu to ACK the previous event. */
1722 tg3_wait_for_event_ack(tp);
1723
1724 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1725
1726 tg3_generate_fw_event(tp);
1727
1728 /* Wait for RX cpu to ACK this event. */
1729 tg3_wait_for_event_ack(tp);
1730 }
1731 }
1732
1733 /* tp->lock is held. */
tg3_write_sig_pre_reset(struct tg3 * tp,int kind)1734 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1735 {
1736 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1737 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1738
1739 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1740 switch (kind) {
1741 case RESET_KIND_INIT:
1742 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1743 DRV_STATE_START);
1744 break;
1745
1746 case RESET_KIND_SHUTDOWN:
1747 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1748 DRV_STATE_UNLOAD);
1749 break;
1750
1751 case RESET_KIND_SUSPEND:
1752 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1753 DRV_STATE_SUSPEND);
1754 break;
1755
1756 default:
1757 break;
1758 }
1759 }
1760 }
1761
1762 /* tp->lock is held. */
tg3_write_sig_post_reset(struct tg3 * tp,int kind)1763 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1764 {
1765 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1766 switch (kind) {
1767 case RESET_KIND_INIT:
1768 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1769 DRV_STATE_START_DONE);
1770 break;
1771
1772 case RESET_KIND_SHUTDOWN:
1773 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1774 DRV_STATE_UNLOAD_DONE);
1775 break;
1776
1777 default:
1778 break;
1779 }
1780 }
1781 }
1782
1783 /* tp->lock is held. */
tg3_write_sig_legacy(struct tg3 * tp,int kind)1784 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1785 {
1786 if (tg3_flag(tp, ENABLE_ASF)) {
1787 switch (kind) {
1788 case RESET_KIND_INIT:
1789 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1790 DRV_STATE_START);
1791 break;
1792
1793 case RESET_KIND_SHUTDOWN:
1794 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1795 DRV_STATE_UNLOAD);
1796 break;
1797
1798 case RESET_KIND_SUSPEND:
1799 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1800 DRV_STATE_SUSPEND);
1801 break;
1802
1803 default:
1804 break;
1805 }
1806 }
1807 }
1808
tg3_poll_fw(struct tg3 * tp)1809 static int tg3_poll_fw(struct tg3 *tp)
1810 {
1811 int i;
1812 u32 val;
1813
1814 if (tg3_flag(tp, NO_FWARE_REPORTED))
1815 return 0;
1816
1817 if (tg3_flag(tp, IS_SSB_CORE)) {
1818 /* We don't use firmware. */
1819 return 0;
1820 }
1821
1822 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1823 /* Wait up to 20ms for init done. */
1824 for (i = 0; i < 200; i++) {
1825 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1826 return 0;
1827 if (pci_channel_offline(tp->pdev))
1828 return -ENODEV;
1829
1830 udelay(100);
1831 }
1832 return -ENODEV;
1833 }
1834
1835 /* Wait for firmware initialization to complete. */
1836 for (i = 0; i < 100000; i++) {
1837 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1838 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1839 break;
1840 if (pci_channel_offline(tp->pdev)) {
1841 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1842 tg3_flag_set(tp, NO_FWARE_REPORTED);
1843 netdev_info(tp->dev, "No firmware running\n");
1844 }
1845
1846 break;
1847 }
1848
1849 udelay(10);
1850 }
1851
1852 /* Chip might not be fitted with firmware. Some Sun onboard
1853 * parts are configured like that. So don't signal the timeout
1854 * of the above loop as an error, but do report the lack of
1855 * running firmware once.
1856 */
1857 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1858 tg3_flag_set(tp, NO_FWARE_REPORTED);
1859
1860 netdev_info(tp->dev, "No firmware running\n");
1861 }
1862
1863 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1864 /* The 57765 A0 needs a little more
1865 * time to do some important work.
1866 */
1867 mdelay(10);
1868 }
1869
1870 return 0;
1871 }
1872
tg3_link_report(struct tg3 * tp)1873 static void tg3_link_report(struct tg3 *tp)
1874 {
1875 if (!netif_carrier_ok(tp->dev)) {
1876 netif_info(tp, link, tp->dev, "Link is down\n");
1877 tg3_ump_link_report(tp);
1878 } else if (netif_msg_link(tp)) {
1879 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1880 (tp->link_config.active_speed == SPEED_1000 ?
1881 1000 :
1882 (tp->link_config.active_speed == SPEED_100 ?
1883 100 : 10)),
1884 (tp->link_config.active_duplex == DUPLEX_FULL ?
1885 "full" : "half"));
1886
1887 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1888 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1889 "on" : "off",
1890 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1891 "on" : "off");
1892
1893 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1894 netdev_info(tp->dev, "EEE is %s\n",
1895 tp->setlpicnt ? "enabled" : "disabled");
1896
1897 tg3_ump_link_report(tp);
1898 }
1899
1900 tp->link_up = netif_carrier_ok(tp->dev);
1901 }
1902
tg3_decode_flowctrl_1000T(u32 adv)1903 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1904 {
1905 u32 flowctrl = 0;
1906
1907 if (adv & ADVERTISE_PAUSE_CAP) {
1908 flowctrl |= FLOW_CTRL_RX;
1909 if (!(adv & ADVERTISE_PAUSE_ASYM))
1910 flowctrl |= FLOW_CTRL_TX;
1911 } else if (adv & ADVERTISE_PAUSE_ASYM)
1912 flowctrl |= FLOW_CTRL_TX;
1913
1914 return flowctrl;
1915 }
1916
tg3_advert_flowctrl_1000X(u8 flow_ctrl)1917 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1918 {
1919 u16 miireg;
1920
1921 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1922 miireg = ADVERTISE_1000XPAUSE;
1923 else if (flow_ctrl & FLOW_CTRL_TX)
1924 miireg = ADVERTISE_1000XPSE_ASYM;
1925 else if (flow_ctrl & FLOW_CTRL_RX)
1926 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1927 else
1928 miireg = 0;
1929
1930 return miireg;
1931 }
1932
tg3_decode_flowctrl_1000X(u32 adv)1933 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1934 {
1935 u32 flowctrl = 0;
1936
1937 if (adv & ADVERTISE_1000XPAUSE) {
1938 flowctrl |= FLOW_CTRL_RX;
1939 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1940 flowctrl |= FLOW_CTRL_TX;
1941 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1942 flowctrl |= FLOW_CTRL_TX;
1943
1944 return flowctrl;
1945 }
1946
tg3_resolve_flowctrl_1000X(u16 lcladv,u16 rmtadv)1947 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1948 {
1949 u8 cap = 0;
1950
1951 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1952 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1953 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1954 if (lcladv & ADVERTISE_1000XPAUSE)
1955 cap = FLOW_CTRL_RX;
1956 if (rmtadv & ADVERTISE_1000XPAUSE)
1957 cap = FLOW_CTRL_TX;
1958 }
1959
1960 return cap;
1961 }
1962
tg3_setup_flow_control(struct tg3 * tp,u32 lcladv,u32 rmtadv)1963 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1964 {
1965 u8 autoneg;
1966 u8 flowctrl = 0;
1967 u32 old_rx_mode = tp->rx_mode;
1968 u32 old_tx_mode = tp->tx_mode;
1969
1970 if (tg3_flag(tp, USE_PHYLIB))
1971 autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
1972 else
1973 autoneg = tp->link_config.autoneg;
1974
1975 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1976 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1977 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1978 else
1979 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1980 } else
1981 flowctrl = tp->link_config.flowctrl;
1982
1983 tp->link_config.active_flowctrl = flowctrl;
1984
1985 if (flowctrl & FLOW_CTRL_RX)
1986 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1987 else
1988 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1989
1990 if (old_rx_mode != tp->rx_mode)
1991 tw32_f(MAC_RX_MODE, tp->rx_mode);
1992
1993 if (flowctrl & FLOW_CTRL_TX)
1994 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1995 else
1996 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1997
1998 if (old_tx_mode != tp->tx_mode)
1999 tw32_f(MAC_TX_MODE, tp->tx_mode);
2000 }
2001
tg3_adjust_link(struct net_device * dev)2002 static void tg3_adjust_link(struct net_device *dev)
2003 {
2004 u8 oldflowctrl, linkmesg = 0;
2005 u32 mac_mode, lcl_adv, rmt_adv;
2006 struct tg3 *tp = netdev_priv(dev);
2007 struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2008
2009 spin_lock_bh(&tp->lock);
2010
2011 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2012 MAC_MODE_HALF_DUPLEX);
2013
2014 oldflowctrl = tp->link_config.active_flowctrl;
2015
2016 if (phydev->link) {
2017 lcl_adv = 0;
2018 rmt_adv = 0;
2019
2020 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2021 mac_mode |= MAC_MODE_PORT_MODE_MII;
2022 else if (phydev->speed == SPEED_1000 ||
2023 tg3_asic_rev(tp) != ASIC_REV_5785)
2024 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2025 else
2026 mac_mode |= MAC_MODE_PORT_MODE_MII;
2027
2028 if (phydev->duplex == DUPLEX_HALF)
2029 mac_mode |= MAC_MODE_HALF_DUPLEX;
2030 else {
2031 lcl_adv = mii_advertise_flowctrl(
2032 tp->link_config.flowctrl);
2033
2034 if (phydev->pause)
2035 rmt_adv = LPA_PAUSE_CAP;
2036 if (phydev->asym_pause)
2037 rmt_adv |= LPA_PAUSE_ASYM;
2038 }
2039
2040 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2041 } else
2042 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2043
2044 if (mac_mode != tp->mac_mode) {
2045 tp->mac_mode = mac_mode;
2046 tw32_f(MAC_MODE, tp->mac_mode);
2047 udelay(40);
2048 }
2049
2050 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2051 if (phydev->speed == SPEED_10)
2052 tw32(MAC_MI_STAT,
2053 MAC_MI_STAT_10MBPS_MODE |
2054 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2055 else
2056 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2057 }
2058
2059 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2060 tw32(MAC_TX_LENGTHS,
2061 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2062 (6 << TX_LENGTHS_IPG_SHIFT) |
2063 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2064 else
2065 tw32(MAC_TX_LENGTHS,
2066 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2067 (6 << TX_LENGTHS_IPG_SHIFT) |
2068 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2069
2070 if (phydev->link != tp->old_link ||
2071 phydev->speed != tp->link_config.active_speed ||
2072 phydev->duplex != tp->link_config.active_duplex ||
2073 oldflowctrl != tp->link_config.active_flowctrl)
2074 linkmesg = 1;
2075
2076 tp->old_link = phydev->link;
2077 tp->link_config.active_speed = phydev->speed;
2078 tp->link_config.active_duplex = phydev->duplex;
2079
2080 spin_unlock_bh(&tp->lock);
2081
2082 if (linkmesg)
2083 tg3_link_report(tp);
2084 }
2085
tg3_phy_init(struct tg3 * tp)2086 static int tg3_phy_init(struct tg3 *tp)
2087 {
2088 struct phy_device *phydev;
2089
2090 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2091 return 0;
2092
2093 /* Bring the PHY back to a known state. */
2094 tg3_bmcr_reset(tp);
2095
2096 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2097
2098 /* Attach the MAC to the PHY. */
2099 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2100 tg3_adjust_link, phydev->interface);
2101 if (IS_ERR(phydev)) {
2102 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2103 return PTR_ERR(phydev);
2104 }
2105
2106 /* Mask with MAC supported features. */
2107 switch (phydev->interface) {
2108 case PHY_INTERFACE_MODE_GMII:
2109 case PHY_INTERFACE_MODE_RGMII:
2110 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2111 phydev->supported &= (PHY_GBIT_FEATURES |
2112 SUPPORTED_Pause |
2113 SUPPORTED_Asym_Pause);
2114 break;
2115 }
2116 /* fallthru */
2117 case PHY_INTERFACE_MODE_MII:
2118 phydev->supported &= (PHY_BASIC_FEATURES |
2119 SUPPORTED_Pause |
2120 SUPPORTED_Asym_Pause);
2121 break;
2122 default:
2123 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2124 return -EINVAL;
2125 }
2126
2127 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2128
2129 phydev->advertising = phydev->supported;
2130
2131 return 0;
2132 }
2133
tg3_phy_start(struct tg3 * tp)2134 static void tg3_phy_start(struct tg3 *tp)
2135 {
2136 struct phy_device *phydev;
2137
2138 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2139 return;
2140
2141 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2142
2143 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2144 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2145 phydev->speed = tp->link_config.speed;
2146 phydev->duplex = tp->link_config.duplex;
2147 phydev->autoneg = tp->link_config.autoneg;
2148 phydev->advertising = tp->link_config.advertising;
2149 }
2150
2151 phy_start(phydev);
2152
2153 phy_start_aneg(phydev);
2154 }
2155
tg3_phy_stop(struct tg3 * tp)2156 static void tg3_phy_stop(struct tg3 *tp)
2157 {
2158 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2159 return;
2160
2161 phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
2162 }
2163
tg3_phy_fini(struct tg3 * tp)2164 static void tg3_phy_fini(struct tg3 *tp)
2165 {
2166 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2167 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2168 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2169 }
2170 }
2171
tg3_phy_set_extloopbk(struct tg3 * tp)2172 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2173 {
2174 int err;
2175 u32 val;
2176
2177 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2178 return 0;
2179
2180 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2181 /* Cannot do read-modify-write on 5401 */
2182 err = tg3_phy_auxctl_write(tp,
2183 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2184 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2185 0x4c20);
2186 goto done;
2187 }
2188
2189 err = tg3_phy_auxctl_read(tp,
2190 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2191 if (err)
2192 return err;
2193
2194 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2195 err = tg3_phy_auxctl_write(tp,
2196 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2197
2198 done:
2199 return err;
2200 }
2201
tg3_phy_fet_toggle_apd(struct tg3 * tp,bool enable)2202 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2203 {
2204 u32 phytest;
2205
2206 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2207 u32 phy;
2208
2209 tg3_writephy(tp, MII_TG3_FET_TEST,
2210 phytest | MII_TG3_FET_SHADOW_EN);
2211 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2212 if (enable)
2213 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2214 else
2215 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2216 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2217 }
2218 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2219 }
2220 }
2221
tg3_phy_toggle_apd(struct tg3 * tp,bool enable)2222 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2223 {
2224 u32 reg;
2225
2226 if (!tg3_flag(tp, 5705_PLUS) ||
2227 (tg3_flag(tp, 5717_PLUS) &&
2228 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2229 return;
2230
2231 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2232 tg3_phy_fet_toggle_apd(tp, enable);
2233 return;
2234 }
2235
2236 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2237 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2238 MII_TG3_MISC_SHDW_SCR5_SDTL |
2239 MII_TG3_MISC_SHDW_SCR5_C125OE;
2240 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2241 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2242
2243 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2244
2245
2246 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2247 if (enable)
2248 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2249
2250 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2251 }
2252
tg3_phy_toggle_automdix(struct tg3 * tp,bool enable)2253 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2254 {
2255 u32 phy;
2256
2257 if (!tg3_flag(tp, 5705_PLUS) ||
2258 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2259 return;
2260
2261 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2262 u32 ephy;
2263
2264 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2265 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2266
2267 tg3_writephy(tp, MII_TG3_FET_TEST,
2268 ephy | MII_TG3_FET_SHADOW_EN);
2269 if (!tg3_readphy(tp, reg, &phy)) {
2270 if (enable)
2271 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2272 else
2273 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2274 tg3_writephy(tp, reg, phy);
2275 }
2276 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2277 }
2278 } else {
2279 int ret;
2280
2281 ret = tg3_phy_auxctl_read(tp,
2282 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2283 if (!ret) {
2284 if (enable)
2285 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2286 else
2287 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2288 tg3_phy_auxctl_write(tp,
2289 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2290 }
2291 }
2292 }
2293
tg3_phy_set_wirespeed(struct tg3 * tp)2294 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2295 {
2296 int ret;
2297 u32 val;
2298
2299 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2300 return;
2301
2302 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2303 if (!ret)
2304 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2305 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2306 }
2307
tg3_phy_apply_otp(struct tg3 * tp)2308 static void tg3_phy_apply_otp(struct tg3 *tp)
2309 {
2310 u32 otp, phy;
2311
2312 if (!tp->phy_otp)
2313 return;
2314
2315 otp = tp->phy_otp;
2316
2317 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2318 return;
2319
2320 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2321 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2322 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2323
2324 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2325 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2326 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2327
2328 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2329 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2330 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2331
2332 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2333 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2334
2335 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2336 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2337
2338 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2339 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2340 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2341
2342 tg3_phy_toggle_auxctl_smdsp(tp, false);
2343 }
2344
tg3_eee_pull_config(struct tg3 * tp,struct ethtool_eee * eee)2345 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2346 {
2347 u32 val;
2348 struct ethtool_eee *dest = &tp->eee;
2349
2350 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2351 return;
2352
2353 if (eee)
2354 dest = eee;
2355
2356 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2357 return;
2358
2359 /* Pull eee_active */
2360 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2361 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2362 dest->eee_active = 1;
2363 } else
2364 dest->eee_active = 0;
2365
2366 /* Pull lp advertised settings */
2367 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2368 return;
2369 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2370
2371 /* Pull advertised and eee_enabled settings */
2372 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2373 return;
2374 dest->eee_enabled = !!val;
2375 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2376
2377 /* Pull tx_lpi_enabled */
2378 val = tr32(TG3_CPMU_EEE_MODE);
2379 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2380
2381 /* Pull lpi timer value */
2382 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2383 }
2384
tg3_phy_eee_adjust(struct tg3 * tp,bool current_link_up)2385 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2386 {
2387 u32 val;
2388
2389 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2390 return;
2391
2392 tp->setlpicnt = 0;
2393
2394 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2395 current_link_up &&
2396 tp->link_config.active_duplex == DUPLEX_FULL &&
2397 (tp->link_config.active_speed == SPEED_100 ||
2398 tp->link_config.active_speed == SPEED_1000)) {
2399 u32 eeectl;
2400
2401 if (tp->link_config.active_speed == SPEED_1000)
2402 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2403 else
2404 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2405
2406 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2407
2408 tg3_eee_pull_config(tp, NULL);
2409 if (tp->eee.eee_active)
2410 tp->setlpicnt = 2;
2411 }
2412
2413 if (!tp->setlpicnt) {
2414 if (current_link_up &&
2415 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2416 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2417 tg3_phy_toggle_auxctl_smdsp(tp, false);
2418 }
2419
2420 val = tr32(TG3_CPMU_EEE_MODE);
2421 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2422 }
2423 }
2424
tg3_phy_eee_enable(struct tg3 * tp)2425 static void tg3_phy_eee_enable(struct tg3 *tp)
2426 {
2427 u32 val;
2428
2429 if (tp->link_config.active_speed == SPEED_1000 &&
2430 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2431 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2432 tg3_flag(tp, 57765_CLASS)) &&
2433 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2434 val = MII_TG3_DSP_TAP26_ALNOKO |
2435 MII_TG3_DSP_TAP26_RMRXSTO;
2436 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2437 tg3_phy_toggle_auxctl_smdsp(tp, false);
2438 }
2439
2440 val = tr32(TG3_CPMU_EEE_MODE);
2441 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2442 }
2443
tg3_wait_macro_done(struct tg3 * tp)2444 static int tg3_wait_macro_done(struct tg3 *tp)
2445 {
2446 int limit = 100;
2447
2448 while (limit--) {
2449 u32 tmp32;
2450
2451 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2452 if ((tmp32 & 0x1000) == 0)
2453 break;
2454 }
2455 }
2456 if (limit < 0)
2457 return -EBUSY;
2458
2459 return 0;
2460 }
2461
tg3_phy_write_and_check_testpat(struct tg3 * tp,int * resetp)2462 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2463 {
2464 static const u32 test_pat[4][6] = {
2465 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2466 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2467 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2468 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2469 };
2470 int chan;
2471
2472 for (chan = 0; chan < 4; chan++) {
2473 int i;
2474
2475 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2476 (chan * 0x2000) | 0x0200);
2477 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2478
2479 for (i = 0; i < 6; i++)
2480 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2481 test_pat[chan][i]);
2482
2483 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2484 if (tg3_wait_macro_done(tp)) {
2485 *resetp = 1;
2486 return -EBUSY;
2487 }
2488
2489 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2490 (chan * 0x2000) | 0x0200);
2491 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2492 if (tg3_wait_macro_done(tp)) {
2493 *resetp = 1;
2494 return -EBUSY;
2495 }
2496
2497 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2498 if (tg3_wait_macro_done(tp)) {
2499 *resetp = 1;
2500 return -EBUSY;
2501 }
2502
2503 for (i = 0; i < 6; i += 2) {
2504 u32 low, high;
2505
2506 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2507 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2508 tg3_wait_macro_done(tp)) {
2509 *resetp = 1;
2510 return -EBUSY;
2511 }
2512 low &= 0x7fff;
2513 high &= 0x000f;
2514 if (low != test_pat[chan][i] ||
2515 high != test_pat[chan][i+1]) {
2516 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2517 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2518 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2519
2520 return -EBUSY;
2521 }
2522 }
2523 }
2524
2525 return 0;
2526 }
2527
tg3_phy_reset_chanpat(struct tg3 * tp)2528 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2529 {
2530 int chan;
2531
2532 for (chan = 0; chan < 4; chan++) {
2533 int i;
2534
2535 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2536 (chan * 0x2000) | 0x0200);
2537 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2538 for (i = 0; i < 6; i++)
2539 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2540 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2541 if (tg3_wait_macro_done(tp))
2542 return -EBUSY;
2543 }
2544
2545 return 0;
2546 }
2547
tg3_phy_reset_5703_4_5(struct tg3 * tp)2548 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2549 {
2550 u32 reg32, phy9_orig;
2551 int retries, do_phy_reset, err;
2552
2553 retries = 10;
2554 do_phy_reset = 1;
2555 do {
2556 if (do_phy_reset) {
2557 err = tg3_bmcr_reset(tp);
2558 if (err)
2559 return err;
2560 do_phy_reset = 0;
2561 }
2562
2563 /* Disable transmitter and interrupt. */
2564 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2565 continue;
2566
2567 reg32 |= 0x3000;
2568 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2569
2570 /* Set full-duplex, 1000 mbps. */
2571 tg3_writephy(tp, MII_BMCR,
2572 BMCR_FULLDPLX | BMCR_SPEED1000);
2573
2574 /* Set to master mode. */
2575 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2576 continue;
2577
2578 tg3_writephy(tp, MII_CTRL1000,
2579 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2580
2581 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2582 if (err)
2583 return err;
2584
2585 /* Block the PHY control access. */
2586 tg3_phydsp_write(tp, 0x8005, 0x0800);
2587
2588 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2589 if (!err)
2590 break;
2591 } while (--retries);
2592
2593 err = tg3_phy_reset_chanpat(tp);
2594 if (err)
2595 return err;
2596
2597 tg3_phydsp_write(tp, 0x8005, 0x0000);
2598
2599 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2600 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2601
2602 tg3_phy_toggle_auxctl_smdsp(tp, false);
2603
2604 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2605
2606 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2607 if (err)
2608 return err;
2609
2610 reg32 &= ~0x3000;
2611 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2612
2613 return 0;
2614 }
2615
tg3_carrier_off(struct tg3 * tp)2616 static void tg3_carrier_off(struct tg3 *tp)
2617 {
2618 netif_carrier_off(tp->dev);
2619 tp->link_up = false;
2620 }
2621
tg3_warn_mgmt_link_flap(struct tg3 * tp)2622 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2623 {
2624 if (tg3_flag(tp, ENABLE_ASF))
2625 netdev_warn(tp->dev,
2626 "Management side-band traffic will be interrupted during phy settings change\n");
2627 }
2628
2629 /* This will reset the tigon3 PHY if there is no valid
2630 * link unless the FORCE argument is non-zero.
2631 */
tg3_phy_reset(struct tg3 * tp)2632 static int tg3_phy_reset(struct tg3 *tp)
2633 {
2634 u32 val, cpmuctrl;
2635 int err;
2636
2637 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2638 val = tr32(GRC_MISC_CFG);
2639 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2640 udelay(40);
2641 }
2642 err = tg3_readphy(tp, MII_BMSR, &val);
2643 err |= tg3_readphy(tp, MII_BMSR, &val);
2644 if (err != 0)
2645 return -EBUSY;
2646
2647 if (netif_running(tp->dev) && tp->link_up) {
2648 netif_carrier_off(tp->dev);
2649 tg3_link_report(tp);
2650 }
2651
2652 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2653 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2654 tg3_asic_rev(tp) == ASIC_REV_5705) {
2655 err = tg3_phy_reset_5703_4_5(tp);
2656 if (err)
2657 return err;
2658 goto out;
2659 }
2660
2661 cpmuctrl = 0;
2662 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2663 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2664 cpmuctrl = tr32(TG3_CPMU_CTRL);
2665 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2666 tw32(TG3_CPMU_CTRL,
2667 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2668 }
2669
2670 err = tg3_bmcr_reset(tp);
2671 if (err)
2672 return err;
2673
2674 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2675 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2676 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2677
2678 tw32(TG3_CPMU_CTRL, cpmuctrl);
2679 }
2680
2681 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2682 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2683 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2684 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2685 CPMU_LSPD_1000MB_MACCLK_12_5) {
2686 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2687 udelay(40);
2688 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2689 }
2690 }
2691
2692 if (tg3_flag(tp, 5717_PLUS) &&
2693 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2694 return 0;
2695
2696 tg3_phy_apply_otp(tp);
2697
2698 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2699 tg3_phy_toggle_apd(tp, true);
2700 else
2701 tg3_phy_toggle_apd(tp, false);
2702
2703 out:
2704 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2705 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2706 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2707 tg3_phydsp_write(tp, 0x000a, 0x0323);
2708 tg3_phy_toggle_auxctl_smdsp(tp, false);
2709 }
2710
2711 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2712 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2713 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2714 }
2715
2716 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2717 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2718 tg3_phydsp_write(tp, 0x000a, 0x310b);
2719 tg3_phydsp_write(tp, 0x201f, 0x9506);
2720 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2721 tg3_phy_toggle_auxctl_smdsp(tp, false);
2722 }
2723 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2724 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2725 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2726 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2727 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2728 tg3_writephy(tp, MII_TG3_TEST1,
2729 MII_TG3_TEST1_TRIM_EN | 0x4);
2730 } else
2731 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2732
2733 tg3_phy_toggle_auxctl_smdsp(tp, false);
2734 }
2735 }
2736
2737 /* Set Extended packet length bit (bit 14) on all chips that */
2738 /* support jumbo frames */
2739 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2740 /* Cannot do read-modify-write on 5401 */
2741 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2742 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2743 /* Set bit 14 with read-modify-write to preserve other bits */
2744 err = tg3_phy_auxctl_read(tp,
2745 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2746 if (!err)
2747 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2748 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2749 }
2750
2751 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2752 * jumbo frames transmission.
2753 */
2754 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2755 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2756 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2757 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2758 }
2759
2760 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2761 /* adjust output voltage */
2762 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2763 }
2764
2765 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2766 tg3_phydsp_write(tp, 0xffb, 0x4000);
2767
2768 tg3_phy_toggle_automdix(tp, true);
2769 tg3_phy_set_wirespeed(tp);
2770 return 0;
2771 }
2772
2773 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2774 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2775 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2776 TG3_GPIO_MSG_NEED_VAUX)
2777 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2778 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2779 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2780 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2781 (TG3_GPIO_MSG_DRVR_PRES << 12))
2782
2783 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2784 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2785 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2786 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2787 (TG3_GPIO_MSG_NEED_VAUX << 12))
2788
tg3_set_function_status(struct tg3 * tp,u32 newstat)2789 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2790 {
2791 u32 status, shift;
2792
2793 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2794 tg3_asic_rev(tp) == ASIC_REV_5719)
2795 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2796 else
2797 status = tr32(TG3_CPMU_DRV_STATUS);
2798
2799 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2800 status &= ~(TG3_GPIO_MSG_MASK << shift);
2801 status |= (newstat << shift);
2802
2803 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2804 tg3_asic_rev(tp) == ASIC_REV_5719)
2805 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2806 else
2807 tw32(TG3_CPMU_DRV_STATUS, status);
2808
2809 return status >> TG3_APE_GPIO_MSG_SHIFT;
2810 }
2811
tg3_pwrsrc_switch_to_vmain(struct tg3 * tp)2812 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2813 {
2814 if (!tg3_flag(tp, IS_NIC))
2815 return 0;
2816
2817 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2818 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2819 tg3_asic_rev(tp) == ASIC_REV_5720) {
2820 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2821 return -EIO;
2822
2823 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2824
2825 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2826 TG3_GRC_LCLCTL_PWRSW_DELAY);
2827
2828 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2829 } else {
2830 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2831 TG3_GRC_LCLCTL_PWRSW_DELAY);
2832 }
2833
2834 return 0;
2835 }
2836
tg3_pwrsrc_die_with_vmain(struct tg3 * tp)2837 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2838 {
2839 u32 grc_local_ctrl;
2840
2841 if (!tg3_flag(tp, IS_NIC) ||
2842 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2843 tg3_asic_rev(tp) == ASIC_REV_5701)
2844 return;
2845
2846 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2847
2848 tw32_wait_f(GRC_LOCAL_CTRL,
2849 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2850 TG3_GRC_LCLCTL_PWRSW_DELAY);
2851
2852 tw32_wait_f(GRC_LOCAL_CTRL,
2853 grc_local_ctrl,
2854 TG3_GRC_LCLCTL_PWRSW_DELAY);
2855
2856 tw32_wait_f(GRC_LOCAL_CTRL,
2857 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2858 TG3_GRC_LCLCTL_PWRSW_DELAY);
2859 }
2860
tg3_pwrsrc_switch_to_vaux(struct tg3 * tp)2861 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2862 {
2863 if (!tg3_flag(tp, IS_NIC))
2864 return;
2865
2866 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2867 tg3_asic_rev(tp) == ASIC_REV_5701) {
2868 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2869 (GRC_LCLCTRL_GPIO_OE0 |
2870 GRC_LCLCTRL_GPIO_OE1 |
2871 GRC_LCLCTRL_GPIO_OE2 |
2872 GRC_LCLCTRL_GPIO_OUTPUT0 |
2873 GRC_LCLCTRL_GPIO_OUTPUT1),
2874 TG3_GRC_LCLCTL_PWRSW_DELAY);
2875 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2876 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2877 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2878 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2879 GRC_LCLCTRL_GPIO_OE1 |
2880 GRC_LCLCTRL_GPIO_OE2 |
2881 GRC_LCLCTRL_GPIO_OUTPUT0 |
2882 GRC_LCLCTRL_GPIO_OUTPUT1 |
2883 tp->grc_local_ctrl;
2884 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2885 TG3_GRC_LCLCTL_PWRSW_DELAY);
2886
2887 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2888 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2889 TG3_GRC_LCLCTL_PWRSW_DELAY);
2890
2891 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2892 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2893 TG3_GRC_LCLCTL_PWRSW_DELAY);
2894 } else {
2895 u32 no_gpio2;
2896 u32 grc_local_ctrl = 0;
2897
2898 /* Workaround to prevent overdrawing Amps. */
2899 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2900 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2901 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2902 grc_local_ctrl,
2903 TG3_GRC_LCLCTL_PWRSW_DELAY);
2904 }
2905
2906 /* On 5753 and variants, GPIO2 cannot be used. */
2907 no_gpio2 = tp->nic_sram_data_cfg &
2908 NIC_SRAM_DATA_CFG_NO_GPIO2;
2909
2910 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2911 GRC_LCLCTRL_GPIO_OE1 |
2912 GRC_LCLCTRL_GPIO_OE2 |
2913 GRC_LCLCTRL_GPIO_OUTPUT1 |
2914 GRC_LCLCTRL_GPIO_OUTPUT2;
2915 if (no_gpio2) {
2916 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2917 GRC_LCLCTRL_GPIO_OUTPUT2);
2918 }
2919 tw32_wait_f(GRC_LOCAL_CTRL,
2920 tp->grc_local_ctrl | grc_local_ctrl,
2921 TG3_GRC_LCLCTL_PWRSW_DELAY);
2922
2923 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2924
2925 tw32_wait_f(GRC_LOCAL_CTRL,
2926 tp->grc_local_ctrl | grc_local_ctrl,
2927 TG3_GRC_LCLCTL_PWRSW_DELAY);
2928
2929 if (!no_gpio2) {
2930 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2931 tw32_wait_f(GRC_LOCAL_CTRL,
2932 tp->grc_local_ctrl | grc_local_ctrl,
2933 TG3_GRC_LCLCTL_PWRSW_DELAY);
2934 }
2935 }
2936 }
2937
tg3_frob_aux_power_5717(struct tg3 * tp,bool wol_enable)2938 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2939 {
2940 u32 msg = 0;
2941
2942 /* Serialize power state transitions */
2943 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2944 return;
2945
2946 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2947 msg = TG3_GPIO_MSG_NEED_VAUX;
2948
2949 msg = tg3_set_function_status(tp, msg);
2950
2951 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2952 goto done;
2953
2954 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2955 tg3_pwrsrc_switch_to_vaux(tp);
2956 else
2957 tg3_pwrsrc_die_with_vmain(tp);
2958
2959 done:
2960 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2961 }
2962
tg3_frob_aux_power(struct tg3 * tp,bool include_wol)2963 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2964 {
2965 bool need_vaux = false;
2966
2967 /* The GPIOs do something completely different on 57765. */
2968 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2969 return;
2970
2971 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2972 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2973 tg3_asic_rev(tp) == ASIC_REV_5720) {
2974 tg3_frob_aux_power_5717(tp, include_wol ?
2975 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2976 return;
2977 }
2978
2979 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2980 struct net_device *dev_peer;
2981
2982 dev_peer = pci_get_drvdata(tp->pdev_peer);
2983
2984 /* remove_one() may have been run on the peer. */
2985 if (dev_peer) {
2986 struct tg3 *tp_peer = netdev_priv(dev_peer);
2987
2988 if (tg3_flag(tp_peer, INIT_COMPLETE))
2989 return;
2990
2991 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2992 tg3_flag(tp_peer, ENABLE_ASF))
2993 need_vaux = true;
2994 }
2995 }
2996
2997 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2998 tg3_flag(tp, ENABLE_ASF))
2999 need_vaux = true;
3000
3001 if (need_vaux)
3002 tg3_pwrsrc_switch_to_vaux(tp);
3003 else
3004 tg3_pwrsrc_die_with_vmain(tp);
3005 }
3006
tg3_5700_link_polarity(struct tg3 * tp,u32 speed)3007 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3008 {
3009 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3010 return 1;
3011 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3012 if (speed != SPEED_10)
3013 return 1;
3014 } else if (speed == SPEED_10)
3015 return 1;
3016
3017 return 0;
3018 }
3019
tg3_phy_power_bug(struct tg3 * tp)3020 static bool tg3_phy_power_bug(struct tg3 *tp)
3021 {
3022 switch (tg3_asic_rev(tp)) {
3023 case ASIC_REV_5700:
3024 case ASIC_REV_5704:
3025 return true;
3026 case ASIC_REV_5780:
3027 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3028 return true;
3029 return false;
3030 case ASIC_REV_5717:
3031 if (!tp->pci_fn)
3032 return true;
3033 return false;
3034 case ASIC_REV_5719:
3035 case ASIC_REV_5720:
3036 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3037 !tp->pci_fn)
3038 return true;
3039 return false;
3040 }
3041
3042 return false;
3043 }
3044
tg3_phy_led_bug(struct tg3 * tp)3045 static bool tg3_phy_led_bug(struct tg3 *tp)
3046 {
3047 switch (tg3_asic_rev(tp)) {
3048 case ASIC_REV_5719:
3049 case ASIC_REV_5720:
3050 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3051 !tp->pci_fn)
3052 return true;
3053 return false;
3054 }
3055
3056 return false;
3057 }
3058
tg3_power_down_phy(struct tg3 * tp,bool do_low_power)3059 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3060 {
3061 u32 val;
3062
3063 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3064 return;
3065
3066 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3067 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3068 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3069 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3070
3071 sg_dig_ctrl |=
3072 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3073 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3074 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3075 }
3076 return;
3077 }
3078
3079 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3080 tg3_bmcr_reset(tp);
3081 val = tr32(GRC_MISC_CFG);
3082 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3083 udelay(40);
3084 return;
3085 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3086 u32 phytest;
3087 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3088 u32 phy;
3089
3090 tg3_writephy(tp, MII_ADVERTISE, 0);
3091 tg3_writephy(tp, MII_BMCR,
3092 BMCR_ANENABLE | BMCR_ANRESTART);
3093
3094 tg3_writephy(tp, MII_TG3_FET_TEST,
3095 phytest | MII_TG3_FET_SHADOW_EN);
3096 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3097 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3098 tg3_writephy(tp,
3099 MII_TG3_FET_SHDW_AUXMODE4,
3100 phy);
3101 }
3102 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3103 }
3104 return;
3105 } else if (do_low_power) {
3106 if (!tg3_phy_led_bug(tp))
3107 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3108 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3109
3110 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3111 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3112 MII_TG3_AUXCTL_PCTL_VREG_11V;
3113 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3114 }
3115
3116 /* The PHY should not be powered down on some chips because
3117 * of bugs.
3118 */
3119 if (tg3_phy_power_bug(tp))
3120 return;
3121
3122 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3123 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3124 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3125 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3126 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3127 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3128 }
3129
3130 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3131 }
3132
3133 /* tp->lock is held. */
tg3_nvram_lock(struct tg3 * tp)3134 static int tg3_nvram_lock(struct tg3 *tp)
3135 {
3136 if (tg3_flag(tp, NVRAM)) {
3137 int i;
3138
3139 if (tp->nvram_lock_cnt == 0) {
3140 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3141 for (i = 0; i < 8000; i++) {
3142 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3143 break;
3144 udelay(20);
3145 }
3146 if (i == 8000) {
3147 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3148 return -ENODEV;
3149 }
3150 }
3151 tp->nvram_lock_cnt++;
3152 }
3153 return 0;
3154 }
3155
3156 /* tp->lock is held. */
tg3_nvram_unlock(struct tg3 * tp)3157 static void tg3_nvram_unlock(struct tg3 *tp)
3158 {
3159 if (tg3_flag(tp, NVRAM)) {
3160 if (tp->nvram_lock_cnt > 0)
3161 tp->nvram_lock_cnt--;
3162 if (tp->nvram_lock_cnt == 0)
3163 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3164 }
3165 }
3166
3167 /* tp->lock is held. */
tg3_enable_nvram_access(struct tg3 * tp)3168 static void tg3_enable_nvram_access(struct tg3 *tp)
3169 {
3170 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3171 u32 nvaccess = tr32(NVRAM_ACCESS);
3172
3173 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3174 }
3175 }
3176
3177 /* tp->lock is held. */
tg3_disable_nvram_access(struct tg3 * tp)3178 static void tg3_disable_nvram_access(struct tg3 *tp)
3179 {
3180 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3181 u32 nvaccess = tr32(NVRAM_ACCESS);
3182
3183 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3184 }
3185 }
3186
tg3_nvram_read_using_eeprom(struct tg3 * tp,u32 offset,u32 * val)3187 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3188 u32 offset, u32 *val)
3189 {
3190 u32 tmp;
3191 int i;
3192
3193 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3194 return -EINVAL;
3195
3196 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3197 EEPROM_ADDR_DEVID_MASK |
3198 EEPROM_ADDR_READ);
3199 tw32(GRC_EEPROM_ADDR,
3200 tmp |
3201 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3202 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3203 EEPROM_ADDR_ADDR_MASK) |
3204 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3205
3206 for (i = 0; i < 1000; i++) {
3207 tmp = tr32(GRC_EEPROM_ADDR);
3208
3209 if (tmp & EEPROM_ADDR_COMPLETE)
3210 break;
3211 msleep(1);
3212 }
3213 if (!(tmp & EEPROM_ADDR_COMPLETE))
3214 return -EBUSY;
3215
3216 tmp = tr32(GRC_EEPROM_DATA);
3217
3218 /*
3219 * The data will always be opposite the native endian
3220 * format. Perform a blind byteswap to compensate.
3221 */
3222 *val = swab32(tmp);
3223
3224 return 0;
3225 }
3226
3227 #define NVRAM_CMD_TIMEOUT 5000
3228
tg3_nvram_exec_cmd(struct tg3 * tp,u32 nvram_cmd)3229 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3230 {
3231 int i;
3232
3233 tw32(NVRAM_CMD, nvram_cmd);
3234 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3235 usleep_range(10, 40);
3236 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3237 udelay(10);
3238 break;
3239 }
3240 }
3241
3242 if (i == NVRAM_CMD_TIMEOUT)
3243 return -EBUSY;
3244
3245 return 0;
3246 }
3247
tg3_nvram_phys_addr(struct tg3 * tp,u32 addr)3248 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3249 {
3250 if (tg3_flag(tp, NVRAM) &&
3251 tg3_flag(tp, NVRAM_BUFFERED) &&
3252 tg3_flag(tp, FLASH) &&
3253 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3254 (tp->nvram_jedecnum == JEDEC_ATMEL))
3255
3256 addr = ((addr / tp->nvram_pagesize) <<
3257 ATMEL_AT45DB0X1B_PAGE_POS) +
3258 (addr % tp->nvram_pagesize);
3259
3260 return addr;
3261 }
3262
tg3_nvram_logical_addr(struct tg3 * tp,u32 addr)3263 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3264 {
3265 if (tg3_flag(tp, NVRAM) &&
3266 tg3_flag(tp, NVRAM_BUFFERED) &&
3267 tg3_flag(tp, FLASH) &&
3268 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3269 (tp->nvram_jedecnum == JEDEC_ATMEL))
3270
3271 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3272 tp->nvram_pagesize) +
3273 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3274
3275 return addr;
3276 }
3277
3278 /* NOTE: Data read in from NVRAM is byteswapped according to
3279 * the byteswapping settings for all other register accesses.
3280 * tg3 devices are BE devices, so on a BE machine, the data
3281 * returned will be exactly as it is seen in NVRAM. On a LE
3282 * machine, the 32-bit value will be byteswapped.
3283 */
tg3_nvram_read(struct tg3 * tp,u32 offset,u32 * val)3284 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3285 {
3286 int ret;
3287
3288 if (!tg3_flag(tp, NVRAM))
3289 return tg3_nvram_read_using_eeprom(tp, offset, val);
3290
3291 offset = tg3_nvram_phys_addr(tp, offset);
3292
3293 if (offset > NVRAM_ADDR_MSK)
3294 return -EINVAL;
3295
3296 ret = tg3_nvram_lock(tp);
3297 if (ret)
3298 return ret;
3299
3300 tg3_enable_nvram_access(tp);
3301
3302 tw32(NVRAM_ADDR, offset);
3303 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3304 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3305
3306 if (ret == 0)
3307 *val = tr32(NVRAM_RDDATA);
3308
3309 tg3_disable_nvram_access(tp);
3310
3311 tg3_nvram_unlock(tp);
3312
3313 return ret;
3314 }
3315
3316 /* Ensures NVRAM data is in bytestream format. */
tg3_nvram_read_be32(struct tg3 * tp,u32 offset,__be32 * val)3317 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3318 {
3319 u32 v;
3320 int res = tg3_nvram_read(tp, offset, &v);
3321 if (!res)
3322 *val = cpu_to_be32(v);
3323 return res;
3324 }
3325
tg3_nvram_write_block_using_eeprom(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3326 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3327 u32 offset, u32 len, u8 *buf)
3328 {
3329 int i, j, rc = 0;
3330 u32 val;
3331
3332 for (i = 0; i < len; i += 4) {
3333 u32 addr;
3334 __be32 data;
3335
3336 addr = offset + i;
3337
3338 memcpy(&data, buf + i, 4);
3339
3340 /*
3341 * The SEEPROM interface expects the data to always be opposite
3342 * the native endian format. We accomplish this by reversing
3343 * all the operations that would have been performed on the
3344 * data from a call to tg3_nvram_read_be32().
3345 */
3346 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3347
3348 val = tr32(GRC_EEPROM_ADDR);
3349 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3350
3351 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3352 EEPROM_ADDR_READ);
3353 tw32(GRC_EEPROM_ADDR, val |
3354 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3355 (addr & EEPROM_ADDR_ADDR_MASK) |
3356 EEPROM_ADDR_START |
3357 EEPROM_ADDR_WRITE);
3358
3359 for (j = 0; j < 1000; j++) {
3360 val = tr32(GRC_EEPROM_ADDR);
3361
3362 if (val & EEPROM_ADDR_COMPLETE)
3363 break;
3364 msleep(1);
3365 }
3366 if (!(val & EEPROM_ADDR_COMPLETE)) {
3367 rc = -EBUSY;
3368 break;
3369 }
3370 }
3371
3372 return rc;
3373 }
3374
3375 /* offset and length are dword aligned */
tg3_nvram_write_block_unbuffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3376 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3377 u8 *buf)
3378 {
3379 int ret = 0;
3380 u32 pagesize = tp->nvram_pagesize;
3381 u32 pagemask = pagesize - 1;
3382 u32 nvram_cmd;
3383 u8 *tmp;
3384
3385 tmp = kmalloc(pagesize, GFP_KERNEL);
3386 if (tmp == NULL)
3387 return -ENOMEM;
3388
3389 while (len) {
3390 int j;
3391 u32 phy_addr, page_off, size;
3392
3393 phy_addr = offset & ~pagemask;
3394
3395 for (j = 0; j < pagesize; j += 4) {
3396 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3397 (__be32 *) (tmp + j));
3398 if (ret)
3399 break;
3400 }
3401 if (ret)
3402 break;
3403
3404 page_off = offset & pagemask;
3405 size = pagesize;
3406 if (len < size)
3407 size = len;
3408
3409 len -= size;
3410
3411 memcpy(tmp + page_off, buf, size);
3412
3413 offset = offset + (pagesize - page_off);
3414
3415 tg3_enable_nvram_access(tp);
3416
3417 /*
3418 * Before we can erase the flash page, we need
3419 * to issue a special "write enable" command.
3420 */
3421 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3422
3423 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3424 break;
3425
3426 /* Erase the target page */
3427 tw32(NVRAM_ADDR, phy_addr);
3428
3429 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3430 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3431
3432 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3433 break;
3434
3435 /* Issue another write enable to start the write. */
3436 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3437
3438 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3439 break;
3440
3441 for (j = 0; j < pagesize; j += 4) {
3442 __be32 data;
3443
3444 data = *((__be32 *) (tmp + j));
3445
3446 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3447
3448 tw32(NVRAM_ADDR, phy_addr + j);
3449
3450 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3451 NVRAM_CMD_WR;
3452
3453 if (j == 0)
3454 nvram_cmd |= NVRAM_CMD_FIRST;
3455 else if (j == (pagesize - 4))
3456 nvram_cmd |= NVRAM_CMD_LAST;
3457
3458 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3459 if (ret)
3460 break;
3461 }
3462 if (ret)
3463 break;
3464 }
3465
3466 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3467 tg3_nvram_exec_cmd(tp, nvram_cmd);
3468
3469 kfree(tmp);
3470
3471 return ret;
3472 }
3473
3474 /* offset and length are dword aligned */
tg3_nvram_write_block_buffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3475 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3476 u8 *buf)
3477 {
3478 int i, ret = 0;
3479
3480 for (i = 0; i < len; i += 4, offset += 4) {
3481 u32 page_off, phy_addr, nvram_cmd;
3482 __be32 data;
3483
3484 memcpy(&data, buf + i, 4);
3485 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3486
3487 page_off = offset % tp->nvram_pagesize;
3488
3489 phy_addr = tg3_nvram_phys_addr(tp, offset);
3490
3491 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3492
3493 if (page_off == 0 || i == 0)
3494 nvram_cmd |= NVRAM_CMD_FIRST;
3495 if (page_off == (tp->nvram_pagesize - 4))
3496 nvram_cmd |= NVRAM_CMD_LAST;
3497
3498 if (i == (len - 4))
3499 nvram_cmd |= NVRAM_CMD_LAST;
3500
3501 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3502 !tg3_flag(tp, FLASH) ||
3503 !tg3_flag(tp, 57765_PLUS))
3504 tw32(NVRAM_ADDR, phy_addr);
3505
3506 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3507 !tg3_flag(tp, 5755_PLUS) &&
3508 (tp->nvram_jedecnum == JEDEC_ST) &&
3509 (nvram_cmd & NVRAM_CMD_FIRST)) {
3510 u32 cmd;
3511
3512 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3513 ret = tg3_nvram_exec_cmd(tp, cmd);
3514 if (ret)
3515 break;
3516 }
3517 if (!tg3_flag(tp, FLASH)) {
3518 /* We always do complete word writes to eeprom. */
3519 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3520 }
3521
3522 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3523 if (ret)
3524 break;
3525 }
3526 return ret;
3527 }
3528
3529 /* offset and length are dword aligned */
tg3_nvram_write_block(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3530 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3531 {
3532 int ret;
3533
3534 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3535 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3536 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3537 udelay(40);
3538 }
3539
3540 if (!tg3_flag(tp, NVRAM)) {
3541 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3542 } else {
3543 u32 grc_mode;
3544
3545 ret = tg3_nvram_lock(tp);
3546 if (ret)
3547 return ret;
3548
3549 tg3_enable_nvram_access(tp);
3550 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3551 tw32(NVRAM_WRITE1, 0x406);
3552
3553 grc_mode = tr32(GRC_MODE);
3554 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3555
3556 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3557 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3558 buf);
3559 } else {
3560 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3561 buf);
3562 }
3563
3564 grc_mode = tr32(GRC_MODE);
3565 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3566
3567 tg3_disable_nvram_access(tp);
3568 tg3_nvram_unlock(tp);
3569 }
3570
3571 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3572 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3573 udelay(40);
3574 }
3575
3576 return ret;
3577 }
3578
3579 #define RX_CPU_SCRATCH_BASE 0x30000
3580 #define RX_CPU_SCRATCH_SIZE 0x04000
3581 #define TX_CPU_SCRATCH_BASE 0x34000
3582 #define TX_CPU_SCRATCH_SIZE 0x04000
3583
3584 /* tp->lock is held. */
tg3_pause_cpu(struct tg3 * tp,u32 cpu_base)3585 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3586 {
3587 int i;
3588 const int iters = 10000;
3589
3590 for (i = 0; i < iters; i++) {
3591 tw32(cpu_base + CPU_STATE, 0xffffffff);
3592 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3593 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3594 break;
3595 if (pci_channel_offline(tp->pdev))
3596 return -EBUSY;
3597 }
3598
3599 return (i == iters) ? -EBUSY : 0;
3600 }
3601
3602 /* tp->lock is held. */
tg3_rxcpu_pause(struct tg3 * tp)3603 static int tg3_rxcpu_pause(struct tg3 *tp)
3604 {
3605 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3606
3607 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3608 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3609 udelay(10);
3610
3611 return rc;
3612 }
3613
3614 /* tp->lock is held. */
tg3_txcpu_pause(struct tg3 * tp)3615 static int tg3_txcpu_pause(struct tg3 *tp)
3616 {
3617 return tg3_pause_cpu(tp, TX_CPU_BASE);
3618 }
3619
3620 /* tp->lock is held. */
tg3_resume_cpu(struct tg3 * tp,u32 cpu_base)3621 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3622 {
3623 tw32(cpu_base + CPU_STATE, 0xffffffff);
3624 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3625 }
3626
3627 /* tp->lock is held. */
tg3_rxcpu_resume(struct tg3 * tp)3628 static void tg3_rxcpu_resume(struct tg3 *tp)
3629 {
3630 tg3_resume_cpu(tp, RX_CPU_BASE);
3631 }
3632
3633 /* tp->lock is held. */
tg3_halt_cpu(struct tg3 * tp,u32 cpu_base)3634 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3635 {
3636 int rc;
3637
3638 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3639
3640 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3641 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3642
3643 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3644 return 0;
3645 }
3646 if (cpu_base == RX_CPU_BASE) {
3647 rc = tg3_rxcpu_pause(tp);
3648 } else {
3649 /*
3650 * There is only an Rx CPU for the 5750 derivative in the
3651 * BCM4785.
3652 */
3653 if (tg3_flag(tp, IS_SSB_CORE))
3654 return 0;
3655
3656 rc = tg3_txcpu_pause(tp);
3657 }
3658
3659 if (rc) {
3660 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3661 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3662 return -ENODEV;
3663 }
3664
3665 /* Clear firmware's nvram arbitration. */
3666 if (tg3_flag(tp, NVRAM))
3667 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3668 return 0;
3669 }
3670
tg3_fw_data_len(struct tg3 * tp,const struct tg3_firmware_hdr * fw_hdr)3671 static int tg3_fw_data_len(struct tg3 *tp,
3672 const struct tg3_firmware_hdr *fw_hdr)
3673 {
3674 int fw_len;
3675
3676 /* Non fragmented firmware have one firmware header followed by a
3677 * contiguous chunk of data to be written. The length field in that
3678 * header is not the length of data to be written but the complete
3679 * length of the bss. The data length is determined based on
3680 * tp->fw->size minus headers.
3681 *
3682 * Fragmented firmware have a main header followed by multiple
3683 * fragments. Each fragment is identical to non fragmented firmware
3684 * with a firmware header followed by a contiguous chunk of data. In
3685 * the main header, the length field is unused and set to 0xffffffff.
3686 * In each fragment header the length is the entire size of that
3687 * fragment i.e. fragment data + header length. Data length is
3688 * therefore length field in the header minus TG3_FW_HDR_LEN.
3689 */
3690 if (tp->fw_len == 0xffffffff)
3691 fw_len = be32_to_cpu(fw_hdr->len);
3692 else
3693 fw_len = tp->fw->size;
3694
3695 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3696 }
3697
3698 /* tp->lock is held. */
tg3_load_firmware_cpu(struct tg3 * tp,u32 cpu_base,u32 cpu_scratch_base,int cpu_scratch_size,const struct tg3_firmware_hdr * fw_hdr)3699 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3700 u32 cpu_scratch_base, int cpu_scratch_size,
3701 const struct tg3_firmware_hdr *fw_hdr)
3702 {
3703 int err, i;
3704 void (*write_op)(struct tg3 *, u32, u32);
3705 int total_len = tp->fw->size;
3706
3707 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3708 netdev_err(tp->dev,
3709 "%s: Trying to load TX cpu firmware which is 5705\n",
3710 __func__);
3711 return -EINVAL;
3712 }
3713
3714 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3715 write_op = tg3_write_mem;
3716 else
3717 write_op = tg3_write_indirect_reg32;
3718
3719 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3720 /* It is possible that bootcode is still loading at this point.
3721 * Get the nvram lock first before halting the cpu.
3722 */
3723 int lock_err = tg3_nvram_lock(tp);
3724 err = tg3_halt_cpu(tp, cpu_base);
3725 if (!lock_err)
3726 tg3_nvram_unlock(tp);
3727 if (err)
3728 goto out;
3729
3730 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3731 write_op(tp, cpu_scratch_base + i, 0);
3732 tw32(cpu_base + CPU_STATE, 0xffffffff);
3733 tw32(cpu_base + CPU_MODE,
3734 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3735 } else {
3736 /* Subtract additional main header for fragmented firmware and
3737 * advance to the first fragment
3738 */
3739 total_len -= TG3_FW_HDR_LEN;
3740 fw_hdr++;
3741 }
3742
3743 do {
3744 u32 *fw_data = (u32 *)(fw_hdr + 1);
3745 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3746 write_op(tp, cpu_scratch_base +
3747 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3748 (i * sizeof(u32)),
3749 be32_to_cpu(fw_data[i]));
3750
3751 total_len -= be32_to_cpu(fw_hdr->len);
3752
3753 /* Advance to next fragment */
3754 fw_hdr = (struct tg3_firmware_hdr *)
3755 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3756 } while (total_len > 0);
3757
3758 err = 0;
3759
3760 out:
3761 return err;
3762 }
3763
3764 /* tp->lock is held. */
tg3_pause_cpu_and_set_pc(struct tg3 * tp,u32 cpu_base,u32 pc)3765 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3766 {
3767 int i;
3768 const int iters = 5;
3769
3770 tw32(cpu_base + CPU_STATE, 0xffffffff);
3771 tw32_f(cpu_base + CPU_PC, pc);
3772
3773 for (i = 0; i < iters; i++) {
3774 if (tr32(cpu_base + CPU_PC) == pc)
3775 break;
3776 tw32(cpu_base + CPU_STATE, 0xffffffff);
3777 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3778 tw32_f(cpu_base + CPU_PC, pc);
3779 udelay(1000);
3780 }
3781
3782 return (i == iters) ? -EBUSY : 0;
3783 }
3784
3785 /* tp->lock is held. */
tg3_load_5701_a0_firmware_fix(struct tg3 * tp)3786 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3787 {
3788 const struct tg3_firmware_hdr *fw_hdr;
3789 int err;
3790
3791 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3792
3793 /* Firmware blob starts with version numbers, followed by
3794 start address and length. We are setting complete length.
3795 length = end_address_of_bss - start_address_of_text.
3796 Remainder is the blob to be loaded contiguously
3797 from start address. */
3798
3799 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3800 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3801 fw_hdr);
3802 if (err)
3803 return err;
3804
3805 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3806 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3807 fw_hdr);
3808 if (err)
3809 return err;
3810
3811 /* Now startup only the RX cpu. */
3812 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3813 be32_to_cpu(fw_hdr->base_addr));
3814 if (err) {
3815 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3816 "should be %08x\n", __func__,
3817 tr32(RX_CPU_BASE + CPU_PC),
3818 be32_to_cpu(fw_hdr->base_addr));
3819 return -ENODEV;
3820 }
3821
3822 tg3_rxcpu_resume(tp);
3823
3824 return 0;
3825 }
3826
tg3_validate_rxcpu_state(struct tg3 * tp)3827 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3828 {
3829 const int iters = 1000;
3830 int i;
3831 u32 val;
3832
3833 /* Wait for boot code to complete initialization and enter service
3834 * loop. It is then safe to download service patches
3835 */
3836 for (i = 0; i < iters; i++) {
3837 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3838 break;
3839
3840 udelay(10);
3841 }
3842
3843 if (i == iters) {
3844 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3845 return -EBUSY;
3846 }
3847
3848 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3849 if (val & 0xff) {
3850 netdev_warn(tp->dev,
3851 "Other patches exist. Not downloading EEE patch\n");
3852 return -EEXIST;
3853 }
3854
3855 return 0;
3856 }
3857
3858 /* tp->lock is held. */
tg3_load_57766_firmware(struct tg3 * tp)3859 static void tg3_load_57766_firmware(struct tg3 *tp)
3860 {
3861 struct tg3_firmware_hdr *fw_hdr;
3862
3863 if (!tg3_flag(tp, NO_NVRAM))
3864 return;
3865
3866 if (tg3_validate_rxcpu_state(tp))
3867 return;
3868
3869 if (!tp->fw)
3870 return;
3871
3872 /* This firmware blob has a different format than older firmware
3873 * releases as given below. The main difference is we have fragmented
3874 * data to be written to non-contiguous locations.
3875 *
3876 * In the beginning we have a firmware header identical to other
3877 * firmware which consists of version, base addr and length. The length
3878 * here is unused and set to 0xffffffff.
3879 *
3880 * This is followed by a series of firmware fragments which are
3881 * individually identical to previous firmware. i.e. they have the
3882 * firmware header and followed by data for that fragment. The version
3883 * field of the individual fragment header is unused.
3884 */
3885
3886 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3887 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3888 return;
3889
3890 if (tg3_rxcpu_pause(tp))
3891 return;
3892
3893 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3894 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3895
3896 tg3_rxcpu_resume(tp);
3897 }
3898
3899 /* tp->lock is held. */
tg3_load_tso_firmware(struct tg3 * tp)3900 static int tg3_load_tso_firmware(struct tg3 *tp)
3901 {
3902 const struct tg3_firmware_hdr *fw_hdr;
3903 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3904 int err;
3905
3906 if (!tg3_flag(tp, FW_TSO))
3907 return 0;
3908
3909 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3910
3911 /* Firmware blob starts with version numbers, followed by
3912 start address and length. We are setting complete length.
3913 length = end_address_of_bss - start_address_of_text.
3914 Remainder is the blob to be loaded contiguously
3915 from start address. */
3916
3917 cpu_scratch_size = tp->fw_len;
3918
3919 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3920 cpu_base = RX_CPU_BASE;
3921 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3922 } else {
3923 cpu_base = TX_CPU_BASE;
3924 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3925 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3926 }
3927
3928 err = tg3_load_firmware_cpu(tp, cpu_base,
3929 cpu_scratch_base, cpu_scratch_size,
3930 fw_hdr);
3931 if (err)
3932 return err;
3933
3934 /* Now startup the cpu. */
3935 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3936 be32_to_cpu(fw_hdr->base_addr));
3937 if (err) {
3938 netdev_err(tp->dev,
3939 "%s fails to set CPU PC, is %08x should be %08x\n",
3940 __func__, tr32(cpu_base + CPU_PC),
3941 be32_to_cpu(fw_hdr->base_addr));
3942 return -ENODEV;
3943 }
3944
3945 tg3_resume_cpu(tp, cpu_base);
3946 return 0;
3947 }
3948
3949 /* tp->lock is held. */
__tg3_set_one_mac_addr(struct tg3 * tp,u8 * mac_addr,int index)3950 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3951 {
3952 u32 addr_high, addr_low;
3953
3954 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3955 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3956 (mac_addr[4] << 8) | mac_addr[5]);
3957
3958 if (index < 4) {
3959 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3960 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3961 } else {
3962 index -= 4;
3963 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3964 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3965 }
3966 }
3967
3968 /* tp->lock is held. */
__tg3_set_mac_addr(struct tg3 * tp,bool skip_mac_1)3969 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3970 {
3971 u32 addr_high;
3972 int i;
3973
3974 for (i = 0; i < 4; i++) {
3975 if (i == 1 && skip_mac_1)
3976 continue;
3977 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3978 }
3979
3980 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3981 tg3_asic_rev(tp) == ASIC_REV_5704) {
3982 for (i = 4; i < 16; i++)
3983 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3984 }
3985
3986 addr_high = (tp->dev->dev_addr[0] +
3987 tp->dev->dev_addr[1] +
3988 tp->dev->dev_addr[2] +
3989 tp->dev->dev_addr[3] +
3990 tp->dev->dev_addr[4] +
3991 tp->dev->dev_addr[5]) &
3992 TX_BACKOFF_SEED_MASK;
3993 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3994 }
3995
tg3_enable_register_access(struct tg3 * tp)3996 static void tg3_enable_register_access(struct tg3 *tp)
3997 {
3998 /*
3999 * Make sure register accesses (indirect or otherwise) will function
4000 * correctly.
4001 */
4002 pci_write_config_dword(tp->pdev,
4003 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4004 }
4005
tg3_power_up(struct tg3 * tp)4006 static int tg3_power_up(struct tg3 *tp)
4007 {
4008 int err;
4009
4010 tg3_enable_register_access(tp);
4011
4012 err = pci_set_power_state(tp->pdev, PCI_D0);
4013 if (!err) {
4014 /* Switch out of Vaux if it is a NIC */
4015 tg3_pwrsrc_switch_to_vmain(tp);
4016 } else {
4017 netdev_err(tp->dev, "Transition to D0 failed\n");
4018 }
4019
4020 return err;
4021 }
4022
4023 static int tg3_setup_phy(struct tg3 *, bool);
4024
tg3_power_down_prepare(struct tg3 * tp)4025 static int tg3_power_down_prepare(struct tg3 *tp)
4026 {
4027 u32 misc_host_ctrl;
4028 bool device_should_wake, do_low_power;
4029
4030 tg3_enable_register_access(tp);
4031
4032 /* Restore the CLKREQ setting. */
4033 if (tg3_flag(tp, CLKREQ_BUG))
4034 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4035 PCI_EXP_LNKCTL_CLKREQ_EN);
4036
4037 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4038 tw32(TG3PCI_MISC_HOST_CTRL,
4039 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4040
4041 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4042 tg3_flag(tp, WOL_ENABLE);
4043
4044 if (tg3_flag(tp, USE_PHYLIB)) {
4045 do_low_power = false;
4046 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4047 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4048 struct phy_device *phydev;
4049 u32 phyid, advertising;
4050
4051 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
4052
4053 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4054
4055 tp->link_config.speed = phydev->speed;
4056 tp->link_config.duplex = phydev->duplex;
4057 tp->link_config.autoneg = phydev->autoneg;
4058 tp->link_config.advertising = phydev->advertising;
4059
4060 advertising = ADVERTISED_TP |
4061 ADVERTISED_Pause |
4062 ADVERTISED_Autoneg |
4063 ADVERTISED_10baseT_Half;
4064
4065 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4066 if (tg3_flag(tp, WOL_SPEED_100MB))
4067 advertising |=
4068 ADVERTISED_100baseT_Half |
4069 ADVERTISED_100baseT_Full |
4070 ADVERTISED_10baseT_Full;
4071 else
4072 advertising |= ADVERTISED_10baseT_Full;
4073 }
4074
4075 phydev->advertising = advertising;
4076
4077 phy_start_aneg(phydev);
4078
4079 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4080 if (phyid != PHY_ID_BCMAC131) {
4081 phyid &= PHY_BCM_OUI_MASK;
4082 if (phyid == PHY_BCM_OUI_1 ||
4083 phyid == PHY_BCM_OUI_2 ||
4084 phyid == PHY_BCM_OUI_3)
4085 do_low_power = true;
4086 }
4087 }
4088 } else {
4089 do_low_power = true;
4090
4091 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4092 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4093
4094 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4095 tg3_setup_phy(tp, false);
4096 }
4097
4098 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4099 u32 val;
4100
4101 val = tr32(GRC_VCPU_EXT_CTRL);
4102 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4103 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4104 int i;
4105 u32 val;
4106
4107 for (i = 0; i < 200; i++) {
4108 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4109 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4110 break;
4111 msleep(1);
4112 }
4113 }
4114 if (tg3_flag(tp, WOL_CAP))
4115 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4116 WOL_DRV_STATE_SHUTDOWN |
4117 WOL_DRV_WOL |
4118 WOL_SET_MAGIC_PKT);
4119
4120 if (device_should_wake) {
4121 u32 mac_mode;
4122
4123 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4124 if (do_low_power &&
4125 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4126 tg3_phy_auxctl_write(tp,
4127 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4128 MII_TG3_AUXCTL_PCTL_WOL_EN |
4129 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4130 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4131 udelay(40);
4132 }
4133
4134 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4135 mac_mode = MAC_MODE_PORT_MODE_GMII;
4136 else if (tp->phy_flags &
4137 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4138 if (tp->link_config.active_speed == SPEED_1000)
4139 mac_mode = MAC_MODE_PORT_MODE_GMII;
4140 else
4141 mac_mode = MAC_MODE_PORT_MODE_MII;
4142 } else
4143 mac_mode = MAC_MODE_PORT_MODE_MII;
4144
4145 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4146 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4147 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4148 SPEED_100 : SPEED_10;
4149 if (tg3_5700_link_polarity(tp, speed))
4150 mac_mode |= MAC_MODE_LINK_POLARITY;
4151 else
4152 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4153 }
4154 } else {
4155 mac_mode = MAC_MODE_PORT_MODE_TBI;
4156 }
4157
4158 if (!tg3_flag(tp, 5750_PLUS))
4159 tw32(MAC_LED_CTRL, tp->led_ctrl);
4160
4161 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4162 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4163 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4164 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4165
4166 if (tg3_flag(tp, ENABLE_APE))
4167 mac_mode |= MAC_MODE_APE_TX_EN |
4168 MAC_MODE_APE_RX_EN |
4169 MAC_MODE_TDE_ENABLE;
4170
4171 tw32_f(MAC_MODE, mac_mode);
4172 udelay(100);
4173
4174 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4175 udelay(10);
4176 }
4177
4178 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4179 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4180 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4181 u32 base_val;
4182
4183 base_val = tp->pci_clock_ctrl;
4184 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4185 CLOCK_CTRL_TXCLK_DISABLE);
4186
4187 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4188 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4189 } else if (tg3_flag(tp, 5780_CLASS) ||
4190 tg3_flag(tp, CPMU_PRESENT) ||
4191 tg3_asic_rev(tp) == ASIC_REV_5906) {
4192 /* do nothing */
4193 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4194 u32 newbits1, newbits2;
4195
4196 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4197 tg3_asic_rev(tp) == ASIC_REV_5701) {
4198 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4199 CLOCK_CTRL_TXCLK_DISABLE |
4200 CLOCK_CTRL_ALTCLK);
4201 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4202 } else if (tg3_flag(tp, 5705_PLUS)) {
4203 newbits1 = CLOCK_CTRL_625_CORE;
4204 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4205 } else {
4206 newbits1 = CLOCK_CTRL_ALTCLK;
4207 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4208 }
4209
4210 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4211 40);
4212
4213 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4214 40);
4215
4216 if (!tg3_flag(tp, 5705_PLUS)) {
4217 u32 newbits3;
4218
4219 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4220 tg3_asic_rev(tp) == ASIC_REV_5701) {
4221 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4222 CLOCK_CTRL_TXCLK_DISABLE |
4223 CLOCK_CTRL_44MHZ_CORE);
4224 } else {
4225 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4226 }
4227
4228 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4229 tp->pci_clock_ctrl | newbits3, 40);
4230 }
4231 }
4232
4233 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4234 tg3_power_down_phy(tp, do_low_power);
4235
4236 tg3_frob_aux_power(tp, true);
4237
4238 /* Workaround for unstable PLL clock */
4239 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4240 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4241 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4242 u32 val = tr32(0x7d00);
4243
4244 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4245 tw32(0x7d00, val);
4246 if (!tg3_flag(tp, ENABLE_ASF)) {
4247 int err;
4248
4249 err = tg3_nvram_lock(tp);
4250 tg3_halt_cpu(tp, RX_CPU_BASE);
4251 if (!err)
4252 tg3_nvram_unlock(tp);
4253 }
4254 }
4255
4256 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4257
4258 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4259
4260 return 0;
4261 }
4262
tg3_power_down(struct tg3 * tp)4263 static void tg3_power_down(struct tg3 *tp)
4264 {
4265 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4266 pci_set_power_state(tp->pdev, PCI_D3hot);
4267 }
4268
tg3_aux_stat_to_speed_duplex(struct tg3 * tp,u32 val,u16 * speed,u8 * duplex)4269 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4270 {
4271 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4272 case MII_TG3_AUX_STAT_10HALF:
4273 *speed = SPEED_10;
4274 *duplex = DUPLEX_HALF;
4275 break;
4276
4277 case MII_TG3_AUX_STAT_10FULL:
4278 *speed = SPEED_10;
4279 *duplex = DUPLEX_FULL;
4280 break;
4281
4282 case MII_TG3_AUX_STAT_100HALF:
4283 *speed = SPEED_100;
4284 *duplex = DUPLEX_HALF;
4285 break;
4286
4287 case MII_TG3_AUX_STAT_100FULL:
4288 *speed = SPEED_100;
4289 *duplex = DUPLEX_FULL;
4290 break;
4291
4292 case MII_TG3_AUX_STAT_1000HALF:
4293 *speed = SPEED_1000;
4294 *duplex = DUPLEX_HALF;
4295 break;
4296
4297 case MII_TG3_AUX_STAT_1000FULL:
4298 *speed = SPEED_1000;
4299 *duplex = DUPLEX_FULL;
4300 break;
4301
4302 default:
4303 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4304 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4305 SPEED_10;
4306 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4307 DUPLEX_HALF;
4308 break;
4309 }
4310 *speed = SPEED_UNKNOWN;
4311 *duplex = DUPLEX_UNKNOWN;
4312 break;
4313 }
4314 }
4315
tg3_phy_autoneg_cfg(struct tg3 * tp,u32 advertise,u32 flowctrl)4316 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4317 {
4318 int err = 0;
4319 u32 val, new_adv;
4320
4321 new_adv = ADVERTISE_CSMA;
4322 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4323 new_adv |= mii_advertise_flowctrl(flowctrl);
4324
4325 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4326 if (err)
4327 goto done;
4328
4329 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4330 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4331
4332 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4333 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4334 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4335
4336 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4337 if (err)
4338 goto done;
4339 }
4340
4341 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4342 goto done;
4343
4344 tw32(TG3_CPMU_EEE_MODE,
4345 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4346
4347 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4348 if (!err) {
4349 u32 err2;
4350
4351 val = 0;
4352 /* Advertise 100-BaseTX EEE ability */
4353 if (advertise & ADVERTISED_100baseT_Full)
4354 val |= MDIO_AN_EEE_ADV_100TX;
4355 /* Advertise 1000-BaseT EEE ability */
4356 if (advertise & ADVERTISED_1000baseT_Full)
4357 val |= MDIO_AN_EEE_ADV_1000T;
4358
4359 if (!tp->eee.eee_enabled) {
4360 val = 0;
4361 tp->eee.advertised = 0;
4362 } else {
4363 tp->eee.advertised = advertise &
4364 (ADVERTISED_100baseT_Full |
4365 ADVERTISED_1000baseT_Full);
4366 }
4367
4368 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4369 if (err)
4370 val = 0;
4371
4372 switch (tg3_asic_rev(tp)) {
4373 case ASIC_REV_5717:
4374 case ASIC_REV_57765:
4375 case ASIC_REV_57766:
4376 case ASIC_REV_5719:
4377 /* If we advertised any eee advertisements above... */
4378 if (val)
4379 val = MII_TG3_DSP_TAP26_ALNOKO |
4380 MII_TG3_DSP_TAP26_RMRXSTO |
4381 MII_TG3_DSP_TAP26_OPCSINPT;
4382 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4383 /* Fall through */
4384 case ASIC_REV_5720:
4385 case ASIC_REV_5762:
4386 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4387 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4388 MII_TG3_DSP_CH34TP2_HIBW01);
4389 }
4390
4391 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4392 if (!err)
4393 err = err2;
4394 }
4395
4396 done:
4397 return err;
4398 }
4399
tg3_phy_copper_begin(struct tg3 * tp)4400 static void tg3_phy_copper_begin(struct tg3 *tp)
4401 {
4402 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4403 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4404 u32 adv, fc;
4405
4406 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4407 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4408 adv = ADVERTISED_10baseT_Half |
4409 ADVERTISED_10baseT_Full;
4410 if (tg3_flag(tp, WOL_SPEED_100MB))
4411 adv |= ADVERTISED_100baseT_Half |
4412 ADVERTISED_100baseT_Full;
4413 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4414 if (!(tp->phy_flags &
4415 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4416 adv |= ADVERTISED_1000baseT_Half;
4417 adv |= ADVERTISED_1000baseT_Full;
4418 }
4419
4420 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4421 } else {
4422 adv = tp->link_config.advertising;
4423 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4424 adv &= ~(ADVERTISED_1000baseT_Half |
4425 ADVERTISED_1000baseT_Full);
4426
4427 fc = tp->link_config.flowctrl;
4428 }
4429
4430 tg3_phy_autoneg_cfg(tp, adv, fc);
4431
4432 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4433 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4434 /* Normally during power down we want to autonegotiate
4435 * the lowest possible speed for WOL. However, to avoid
4436 * link flap, we leave it untouched.
4437 */
4438 return;
4439 }
4440
4441 tg3_writephy(tp, MII_BMCR,
4442 BMCR_ANENABLE | BMCR_ANRESTART);
4443 } else {
4444 int i;
4445 u32 bmcr, orig_bmcr;
4446
4447 tp->link_config.active_speed = tp->link_config.speed;
4448 tp->link_config.active_duplex = tp->link_config.duplex;
4449
4450 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4451 /* With autoneg disabled, 5715 only links up when the
4452 * advertisement register has the configured speed
4453 * enabled.
4454 */
4455 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4456 }
4457
4458 bmcr = 0;
4459 switch (tp->link_config.speed) {
4460 default:
4461 case SPEED_10:
4462 break;
4463
4464 case SPEED_100:
4465 bmcr |= BMCR_SPEED100;
4466 break;
4467
4468 case SPEED_1000:
4469 bmcr |= BMCR_SPEED1000;
4470 break;
4471 }
4472
4473 if (tp->link_config.duplex == DUPLEX_FULL)
4474 bmcr |= BMCR_FULLDPLX;
4475
4476 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4477 (bmcr != orig_bmcr)) {
4478 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4479 for (i = 0; i < 1500; i++) {
4480 u32 tmp;
4481
4482 udelay(10);
4483 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4484 tg3_readphy(tp, MII_BMSR, &tmp))
4485 continue;
4486 if (!(tmp & BMSR_LSTATUS)) {
4487 udelay(40);
4488 break;
4489 }
4490 }
4491 tg3_writephy(tp, MII_BMCR, bmcr);
4492 udelay(40);
4493 }
4494 }
4495 }
4496
tg3_phy_pull_config(struct tg3 * tp)4497 static int tg3_phy_pull_config(struct tg3 *tp)
4498 {
4499 int err;
4500 u32 val;
4501
4502 err = tg3_readphy(tp, MII_BMCR, &val);
4503 if (err)
4504 goto done;
4505
4506 if (!(val & BMCR_ANENABLE)) {
4507 tp->link_config.autoneg = AUTONEG_DISABLE;
4508 tp->link_config.advertising = 0;
4509 tg3_flag_clear(tp, PAUSE_AUTONEG);
4510
4511 err = -EIO;
4512
4513 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4514 case 0:
4515 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4516 goto done;
4517
4518 tp->link_config.speed = SPEED_10;
4519 break;
4520 case BMCR_SPEED100:
4521 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4522 goto done;
4523
4524 tp->link_config.speed = SPEED_100;
4525 break;
4526 case BMCR_SPEED1000:
4527 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4528 tp->link_config.speed = SPEED_1000;
4529 break;
4530 }
4531 /* Fall through */
4532 default:
4533 goto done;
4534 }
4535
4536 if (val & BMCR_FULLDPLX)
4537 tp->link_config.duplex = DUPLEX_FULL;
4538 else
4539 tp->link_config.duplex = DUPLEX_HALF;
4540
4541 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4542
4543 err = 0;
4544 goto done;
4545 }
4546
4547 tp->link_config.autoneg = AUTONEG_ENABLE;
4548 tp->link_config.advertising = ADVERTISED_Autoneg;
4549 tg3_flag_set(tp, PAUSE_AUTONEG);
4550
4551 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4552 u32 adv;
4553
4554 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4555 if (err)
4556 goto done;
4557
4558 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4559 tp->link_config.advertising |= adv | ADVERTISED_TP;
4560
4561 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4562 } else {
4563 tp->link_config.advertising |= ADVERTISED_FIBRE;
4564 }
4565
4566 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4567 u32 adv;
4568
4569 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4570 err = tg3_readphy(tp, MII_CTRL1000, &val);
4571 if (err)
4572 goto done;
4573
4574 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4575 } else {
4576 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4577 if (err)
4578 goto done;
4579
4580 adv = tg3_decode_flowctrl_1000X(val);
4581 tp->link_config.flowctrl = adv;
4582
4583 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4584 adv = mii_adv_to_ethtool_adv_x(val);
4585 }
4586
4587 tp->link_config.advertising |= adv;
4588 }
4589
4590 done:
4591 return err;
4592 }
4593
tg3_init_5401phy_dsp(struct tg3 * tp)4594 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4595 {
4596 int err;
4597
4598 /* Turn off tap power management. */
4599 /* Set Extended packet length bit */
4600 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4601
4602 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4603 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4604 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4605 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4606 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4607
4608 udelay(40);
4609
4610 return err;
4611 }
4612
tg3_phy_eee_config_ok(struct tg3 * tp)4613 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4614 {
4615 struct ethtool_eee eee;
4616
4617 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4618 return true;
4619
4620 tg3_eee_pull_config(tp, &eee);
4621
4622 if (tp->eee.eee_enabled) {
4623 if (tp->eee.advertised != eee.advertised ||
4624 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4625 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4626 return false;
4627 } else {
4628 /* EEE is disabled but we're advertising */
4629 if (eee.advertised)
4630 return false;
4631 }
4632
4633 return true;
4634 }
4635
tg3_phy_copper_an_config_ok(struct tg3 * tp,u32 * lcladv)4636 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4637 {
4638 u32 advmsk, tgtadv, advertising;
4639
4640 advertising = tp->link_config.advertising;
4641 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4642
4643 advmsk = ADVERTISE_ALL;
4644 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4645 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4646 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4647 }
4648
4649 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4650 return false;
4651
4652 if ((*lcladv & advmsk) != tgtadv)
4653 return false;
4654
4655 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4656 u32 tg3_ctrl;
4657
4658 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4659
4660 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4661 return false;
4662
4663 if (tgtadv &&
4664 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4665 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4666 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4667 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4668 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4669 } else {
4670 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4671 }
4672
4673 if (tg3_ctrl != tgtadv)
4674 return false;
4675 }
4676
4677 return true;
4678 }
4679
tg3_phy_copper_fetch_rmtadv(struct tg3 * tp,u32 * rmtadv)4680 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4681 {
4682 u32 lpeth = 0;
4683
4684 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4685 u32 val;
4686
4687 if (tg3_readphy(tp, MII_STAT1000, &val))
4688 return false;
4689
4690 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4691 }
4692
4693 if (tg3_readphy(tp, MII_LPA, rmtadv))
4694 return false;
4695
4696 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4697 tp->link_config.rmt_adv = lpeth;
4698
4699 return true;
4700 }
4701
tg3_test_and_report_link_chg(struct tg3 * tp,bool curr_link_up)4702 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4703 {
4704 if (curr_link_up != tp->link_up) {
4705 if (curr_link_up) {
4706 netif_carrier_on(tp->dev);
4707 } else {
4708 netif_carrier_off(tp->dev);
4709 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4710 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4711 }
4712
4713 tg3_link_report(tp);
4714 return true;
4715 }
4716
4717 return false;
4718 }
4719
tg3_clear_mac_status(struct tg3 * tp)4720 static void tg3_clear_mac_status(struct tg3 *tp)
4721 {
4722 tw32(MAC_EVENT, 0);
4723
4724 tw32_f(MAC_STATUS,
4725 MAC_STATUS_SYNC_CHANGED |
4726 MAC_STATUS_CFG_CHANGED |
4727 MAC_STATUS_MI_COMPLETION |
4728 MAC_STATUS_LNKSTATE_CHANGED);
4729 udelay(40);
4730 }
4731
tg3_setup_eee(struct tg3 * tp)4732 static void tg3_setup_eee(struct tg3 *tp)
4733 {
4734 u32 val;
4735
4736 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4737 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4738 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4739 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4740
4741 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4742
4743 tw32_f(TG3_CPMU_EEE_CTRL,
4744 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4745
4746 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4747 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4748 TG3_CPMU_EEEMD_LPI_IN_RX |
4749 TG3_CPMU_EEEMD_EEE_ENABLE;
4750
4751 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4752 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4753
4754 if (tg3_flag(tp, ENABLE_APE))
4755 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4756
4757 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4758
4759 tw32_f(TG3_CPMU_EEE_DBTMR1,
4760 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4761 (tp->eee.tx_lpi_timer & 0xffff));
4762
4763 tw32_f(TG3_CPMU_EEE_DBTMR2,
4764 TG3_CPMU_DBTMR2_APE_TX_2047US |
4765 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4766 }
4767
tg3_setup_copper_phy(struct tg3 * tp,bool force_reset)4768 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4769 {
4770 bool current_link_up;
4771 u32 bmsr, val;
4772 u32 lcl_adv, rmt_adv;
4773 u16 current_speed;
4774 u8 current_duplex;
4775 int i, err;
4776
4777 tg3_clear_mac_status(tp);
4778
4779 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4780 tw32_f(MAC_MI_MODE,
4781 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4782 udelay(80);
4783 }
4784
4785 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4786
4787 /* Some third-party PHYs need to be reset on link going
4788 * down.
4789 */
4790 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4791 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4792 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4793 tp->link_up) {
4794 tg3_readphy(tp, MII_BMSR, &bmsr);
4795 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4796 !(bmsr & BMSR_LSTATUS))
4797 force_reset = true;
4798 }
4799 if (force_reset)
4800 tg3_phy_reset(tp);
4801
4802 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4803 tg3_readphy(tp, MII_BMSR, &bmsr);
4804 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4805 !tg3_flag(tp, INIT_COMPLETE))
4806 bmsr = 0;
4807
4808 if (!(bmsr & BMSR_LSTATUS)) {
4809 err = tg3_init_5401phy_dsp(tp);
4810 if (err)
4811 return err;
4812
4813 tg3_readphy(tp, MII_BMSR, &bmsr);
4814 for (i = 0; i < 1000; i++) {
4815 udelay(10);
4816 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4817 (bmsr & BMSR_LSTATUS)) {
4818 udelay(40);
4819 break;
4820 }
4821 }
4822
4823 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4824 TG3_PHY_REV_BCM5401_B0 &&
4825 !(bmsr & BMSR_LSTATUS) &&
4826 tp->link_config.active_speed == SPEED_1000) {
4827 err = tg3_phy_reset(tp);
4828 if (!err)
4829 err = tg3_init_5401phy_dsp(tp);
4830 if (err)
4831 return err;
4832 }
4833 }
4834 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4835 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4836 /* 5701 {A0,B0} CRC bug workaround */
4837 tg3_writephy(tp, 0x15, 0x0a75);
4838 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4839 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4840 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4841 }
4842
4843 /* Clear pending interrupts... */
4844 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4845 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4846
4847 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4848 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4849 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4850 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4851
4852 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4853 tg3_asic_rev(tp) == ASIC_REV_5701) {
4854 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4855 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4856 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4857 else
4858 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4859 }
4860
4861 current_link_up = false;
4862 current_speed = SPEED_UNKNOWN;
4863 current_duplex = DUPLEX_UNKNOWN;
4864 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4865 tp->link_config.rmt_adv = 0;
4866
4867 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4868 err = tg3_phy_auxctl_read(tp,
4869 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4870 &val);
4871 if (!err && !(val & (1 << 10))) {
4872 tg3_phy_auxctl_write(tp,
4873 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4874 val | (1 << 10));
4875 goto relink;
4876 }
4877 }
4878
4879 bmsr = 0;
4880 for (i = 0; i < 100; i++) {
4881 tg3_readphy(tp, MII_BMSR, &bmsr);
4882 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4883 (bmsr & BMSR_LSTATUS))
4884 break;
4885 udelay(40);
4886 }
4887
4888 if (bmsr & BMSR_LSTATUS) {
4889 u32 aux_stat, bmcr;
4890
4891 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4892 for (i = 0; i < 2000; i++) {
4893 udelay(10);
4894 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4895 aux_stat)
4896 break;
4897 }
4898
4899 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4900 ¤t_speed,
4901 ¤t_duplex);
4902
4903 bmcr = 0;
4904 for (i = 0; i < 200; i++) {
4905 tg3_readphy(tp, MII_BMCR, &bmcr);
4906 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4907 continue;
4908 if (bmcr && bmcr != 0x7fff)
4909 break;
4910 udelay(10);
4911 }
4912
4913 lcl_adv = 0;
4914 rmt_adv = 0;
4915
4916 tp->link_config.active_speed = current_speed;
4917 tp->link_config.active_duplex = current_duplex;
4918
4919 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4920 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4921
4922 if ((bmcr & BMCR_ANENABLE) &&
4923 eee_config_ok &&
4924 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4925 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4926 current_link_up = true;
4927
4928 /* EEE settings changes take effect only after a phy
4929 * reset. If we have skipped a reset due to Link Flap
4930 * Avoidance being enabled, do it now.
4931 */
4932 if (!eee_config_ok &&
4933 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4934 !force_reset) {
4935 tg3_setup_eee(tp);
4936 tg3_phy_reset(tp);
4937 }
4938 } else {
4939 if (!(bmcr & BMCR_ANENABLE) &&
4940 tp->link_config.speed == current_speed &&
4941 tp->link_config.duplex == current_duplex) {
4942 current_link_up = true;
4943 }
4944 }
4945
4946 if (current_link_up &&
4947 tp->link_config.active_duplex == DUPLEX_FULL) {
4948 u32 reg, bit;
4949
4950 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4951 reg = MII_TG3_FET_GEN_STAT;
4952 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4953 } else {
4954 reg = MII_TG3_EXT_STAT;
4955 bit = MII_TG3_EXT_STAT_MDIX;
4956 }
4957
4958 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4959 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4960
4961 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4962 }
4963 }
4964
4965 relink:
4966 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4967 tg3_phy_copper_begin(tp);
4968
4969 if (tg3_flag(tp, ROBOSWITCH)) {
4970 current_link_up = true;
4971 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4972 current_speed = SPEED_1000;
4973 current_duplex = DUPLEX_FULL;
4974 tp->link_config.active_speed = current_speed;
4975 tp->link_config.active_duplex = current_duplex;
4976 }
4977
4978 tg3_readphy(tp, MII_BMSR, &bmsr);
4979 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4980 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4981 current_link_up = true;
4982 }
4983
4984 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4985 if (current_link_up) {
4986 if (tp->link_config.active_speed == SPEED_100 ||
4987 tp->link_config.active_speed == SPEED_10)
4988 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4989 else
4990 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4991 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4992 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4993 else
4994 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4995
4996 /* In order for the 5750 core in BCM4785 chip to work properly
4997 * in RGMII mode, the Led Control Register must be set up.
4998 */
4999 if (tg3_flag(tp, RGMII_MODE)) {
5000 u32 led_ctrl = tr32(MAC_LED_CTRL);
5001 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5002
5003 if (tp->link_config.active_speed == SPEED_10)
5004 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5005 else if (tp->link_config.active_speed == SPEED_100)
5006 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5007 LED_CTRL_100MBPS_ON);
5008 else if (tp->link_config.active_speed == SPEED_1000)
5009 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5010 LED_CTRL_1000MBPS_ON);
5011
5012 tw32(MAC_LED_CTRL, led_ctrl);
5013 udelay(40);
5014 }
5015
5016 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5017 if (tp->link_config.active_duplex == DUPLEX_HALF)
5018 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5019
5020 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5021 if (current_link_up &&
5022 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5023 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5024 else
5025 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5026 }
5027
5028 /* ??? Without this setting Netgear GA302T PHY does not
5029 * ??? send/receive packets...
5030 */
5031 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5032 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5033 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5034 tw32_f(MAC_MI_MODE, tp->mi_mode);
5035 udelay(80);
5036 }
5037
5038 tw32_f(MAC_MODE, tp->mac_mode);
5039 udelay(40);
5040
5041 tg3_phy_eee_adjust(tp, current_link_up);
5042
5043 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5044 /* Polled via timer. */
5045 tw32_f(MAC_EVENT, 0);
5046 } else {
5047 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5048 }
5049 udelay(40);
5050
5051 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5052 current_link_up &&
5053 tp->link_config.active_speed == SPEED_1000 &&
5054 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5055 udelay(120);
5056 tw32_f(MAC_STATUS,
5057 (MAC_STATUS_SYNC_CHANGED |
5058 MAC_STATUS_CFG_CHANGED));
5059 udelay(40);
5060 tg3_write_mem(tp,
5061 NIC_SRAM_FIRMWARE_MBOX,
5062 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5063 }
5064
5065 /* Prevent send BD corruption. */
5066 if (tg3_flag(tp, CLKREQ_BUG)) {
5067 if (tp->link_config.active_speed == SPEED_100 ||
5068 tp->link_config.active_speed == SPEED_10)
5069 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5070 PCI_EXP_LNKCTL_CLKREQ_EN);
5071 else
5072 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5073 PCI_EXP_LNKCTL_CLKREQ_EN);
5074 }
5075
5076 tg3_test_and_report_link_chg(tp, current_link_up);
5077
5078 return 0;
5079 }
5080
5081 struct tg3_fiber_aneginfo {
5082 int state;
5083 #define ANEG_STATE_UNKNOWN 0
5084 #define ANEG_STATE_AN_ENABLE 1
5085 #define ANEG_STATE_RESTART_INIT 2
5086 #define ANEG_STATE_RESTART 3
5087 #define ANEG_STATE_DISABLE_LINK_OK 4
5088 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5089 #define ANEG_STATE_ABILITY_DETECT 6
5090 #define ANEG_STATE_ACK_DETECT_INIT 7
5091 #define ANEG_STATE_ACK_DETECT 8
5092 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5093 #define ANEG_STATE_COMPLETE_ACK 10
5094 #define ANEG_STATE_IDLE_DETECT_INIT 11
5095 #define ANEG_STATE_IDLE_DETECT 12
5096 #define ANEG_STATE_LINK_OK 13
5097 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5098 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5099
5100 u32 flags;
5101 #define MR_AN_ENABLE 0x00000001
5102 #define MR_RESTART_AN 0x00000002
5103 #define MR_AN_COMPLETE 0x00000004
5104 #define MR_PAGE_RX 0x00000008
5105 #define MR_NP_LOADED 0x00000010
5106 #define MR_TOGGLE_TX 0x00000020
5107 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5108 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5109 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5110 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5111 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5112 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5113 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5114 #define MR_TOGGLE_RX 0x00002000
5115 #define MR_NP_RX 0x00004000
5116
5117 #define MR_LINK_OK 0x80000000
5118
5119 unsigned long link_time, cur_time;
5120
5121 u32 ability_match_cfg;
5122 int ability_match_count;
5123
5124 char ability_match, idle_match, ack_match;
5125
5126 u32 txconfig, rxconfig;
5127 #define ANEG_CFG_NP 0x00000080
5128 #define ANEG_CFG_ACK 0x00000040
5129 #define ANEG_CFG_RF2 0x00000020
5130 #define ANEG_CFG_RF1 0x00000010
5131 #define ANEG_CFG_PS2 0x00000001
5132 #define ANEG_CFG_PS1 0x00008000
5133 #define ANEG_CFG_HD 0x00004000
5134 #define ANEG_CFG_FD 0x00002000
5135 #define ANEG_CFG_INVAL 0x00001f06
5136
5137 };
5138 #define ANEG_OK 0
5139 #define ANEG_DONE 1
5140 #define ANEG_TIMER_ENAB 2
5141 #define ANEG_FAILED -1
5142
5143 #define ANEG_STATE_SETTLE_TIME 10000
5144
tg3_fiber_aneg_smachine(struct tg3 * tp,struct tg3_fiber_aneginfo * ap)5145 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5146 struct tg3_fiber_aneginfo *ap)
5147 {
5148 u16 flowctrl;
5149 unsigned long delta;
5150 u32 rx_cfg_reg;
5151 int ret;
5152
5153 if (ap->state == ANEG_STATE_UNKNOWN) {
5154 ap->rxconfig = 0;
5155 ap->link_time = 0;
5156 ap->cur_time = 0;
5157 ap->ability_match_cfg = 0;
5158 ap->ability_match_count = 0;
5159 ap->ability_match = 0;
5160 ap->idle_match = 0;
5161 ap->ack_match = 0;
5162 }
5163 ap->cur_time++;
5164
5165 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5166 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5167
5168 if (rx_cfg_reg != ap->ability_match_cfg) {
5169 ap->ability_match_cfg = rx_cfg_reg;
5170 ap->ability_match = 0;
5171 ap->ability_match_count = 0;
5172 } else {
5173 if (++ap->ability_match_count > 1) {
5174 ap->ability_match = 1;
5175 ap->ability_match_cfg = rx_cfg_reg;
5176 }
5177 }
5178 if (rx_cfg_reg & ANEG_CFG_ACK)
5179 ap->ack_match = 1;
5180 else
5181 ap->ack_match = 0;
5182
5183 ap->idle_match = 0;
5184 } else {
5185 ap->idle_match = 1;
5186 ap->ability_match_cfg = 0;
5187 ap->ability_match_count = 0;
5188 ap->ability_match = 0;
5189 ap->ack_match = 0;
5190
5191 rx_cfg_reg = 0;
5192 }
5193
5194 ap->rxconfig = rx_cfg_reg;
5195 ret = ANEG_OK;
5196
5197 switch (ap->state) {
5198 case ANEG_STATE_UNKNOWN:
5199 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5200 ap->state = ANEG_STATE_AN_ENABLE;
5201
5202 /* fallthru */
5203 case ANEG_STATE_AN_ENABLE:
5204 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5205 if (ap->flags & MR_AN_ENABLE) {
5206 ap->link_time = 0;
5207 ap->cur_time = 0;
5208 ap->ability_match_cfg = 0;
5209 ap->ability_match_count = 0;
5210 ap->ability_match = 0;
5211 ap->idle_match = 0;
5212 ap->ack_match = 0;
5213
5214 ap->state = ANEG_STATE_RESTART_INIT;
5215 } else {
5216 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5217 }
5218 break;
5219
5220 case ANEG_STATE_RESTART_INIT:
5221 ap->link_time = ap->cur_time;
5222 ap->flags &= ~(MR_NP_LOADED);
5223 ap->txconfig = 0;
5224 tw32(MAC_TX_AUTO_NEG, 0);
5225 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5226 tw32_f(MAC_MODE, tp->mac_mode);
5227 udelay(40);
5228
5229 ret = ANEG_TIMER_ENAB;
5230 ap->state = ANEG_STATE_RESTART;
5231
5232 /* fallthru */
5233 case ANEG_STATE_RESTART:
5234 delta = ap->cur_time - ap->link_time;
5235 if (delta > ANEG_STATE_SETTLE_TIME)
5236 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5237 else
5238 ret = ANEG_TIMER_ENAB;
5239 break;
5240
5241 case ANEG_STATE_DISABLE_LINK_OK:
5242 ret = ANEG_DONE;
5243 break;
5244
5245 case ANEG_STATE_ABILITY_DETECT_INIT:
5246 ap->flags &= ~(MR_TOGGLE_TX);
5247 ap->txconfig = ANEG_CFG_FD;
5248 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5249 if (flowctrl & ADVERTISE_1000XPAUSE)
5250 ap->txconfig |= ANEG_CFG_PS1;
5251 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5252 ap->txconfig |= ANEG_CFG_PS2;
5253 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5254 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5255 tw32_f(MAC_MODE, tp->mac_mode);
5256 udelay(40);
5257
5258 ap->state = ANEG_STATE_ABILITY_DETECT;
5259 break;
5260
5261 case ANEG_STATE_ABILITY_DETECT:
5262 if (ap->ability_match != 0 && ap->rxconfig != 0)
5263 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5264 break;
5265
5266 case ANEG_STATE_ACK_DETECT_INIT:
5267 ap->txconfig |= ANEG_CFG_ACK;
5268 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5269 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5270 tw32_f(MAC_MODE, tp->mac_mode);
5271 udelay(40);
5272
5273 ap->state = ANEG_STATE_ACK_DETECT;
5274
5275 /* fallthru */
5276 case ANEG_STATE_ACK_DETECT:
5277 if (ap->ack_match != 0) {
5278 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5279 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5280 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5281 } else {
5282 ap->state = ANEG_STATE_AN_ENABLE;
5283 }
5284 } else if (ap->ability_match != 0 &&
5285 ap->rxconfig == 0) {
5286 ap->state = ANEG_STATE_AN_ENABLE;
5287 }
5288 break;
5289
5290 case ANEG_STATE_COMPLETE_ACK_INIT:
5291 if (ap->rxconfig & ANEG_CFG_INVAL) {
5292 ret = ANEG_FAILED;
5293 break;
5294 }
5295 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5296 MR_LP_ADV_HALF_DUPLEX |
5297 MR_LP_ADV_SYM_PAUSE |
5298 MR_LP_ADV_ASYM_PAUSE |
5299 MR_LP_ADV_REMOTE_FAULT1 |
5300 MR_LP_ADV_REMOTE_FAULT2 |
5301 MR_LP_ADV_NEXT_PAGE |
5302 MR_TOGGLE_RX |
5303 MR_NP_RX);
5304 if (ap->rxconfig & ANEG_CFG_FD)
5305 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5306 if (ap->rxconfig & ANEG_CFG_HD)
5307 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5308 if (ap->rxconfig & ANEG_CFG_PS1)
5309 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5310 if (ap->rxconfig & ANEG_CFG_PS2)
5311 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5312 if (ap->rxconfig & ANEG_CFG_RF1)
5313 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5314 if (ap->rxconfig & ANEG_CFG_RF2)
5315 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5316 if (ap->rxconfig & ANEG_CFG_NP)
5317 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5318
5319 ap->link_time = ap->cur_time;
5320
5321 ap->flags ^= (MR_TOGGLE_TX);
5322 if (ap->rxconfig & 0x0008)
5323 ap->flags |= MR_TOGGLE_RX;
5324 if (ap->rxconfig & ANEG_CFG_NP)
5325 ap->flags |= MR_NP_RX;
5326 ap->flags |= MR_PAGE_RX;
5327
5328 ap->state = ANEG_STATE_COMPLETE_ACK;
5329 ret = ANEG_TIMER_ENAB;
5330 break;
5331
5332 case ANEG_STATE_COMPLETE_ACK:
5333 if (ap->ability_match != 0 &&
5334 ap->rxconfig == 0) {
5335 ap->state = ANEG_STATE_AN_ENABLE;
5336 break;
5337 }
5338 delta = ap->cur_time - ap->link_time;
5339 if (delta > ANEG_STATE_SETTLE_TIME) {
5340 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5341 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5342 } else {
5343 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5344 !(ap->flags & MR_NP_RX)) {
5345 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5346 } else {
5347 ret = ANEG_FAILED;
5348 }
5349 }
5350 }
5351 break;
5352
5353 case ANEG_STATE_IDLE_DETECT_INIT:
5354 ap->link_time = ap->cur_time;
5355 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5356 tw32_f(MAC_MODE, tp->mac_mode);
5357 udelay(40);
5358
5359 ap->state = ANEG_STATE_IDLE_DETECT;
5360 ret = ANEG_TIMER_ENAB;
5361 break;
5362
5363 case ANEG_STATE_IDLE_DETECT:
5364 if (ap->ability_match != 0 &&
5365 ap->rxconfig == 0) {
5366 ap->state = ANEG_STATE_AN_ENABLE;
5367 break;
5368 }
5369 delta = ap->cur_time - ap->link_time;
5370 if (delta > ANEG_STATE_SETTLE_TIME) {
5371 /* XXX another gem from the Broadcom driver :( */
5372 ap->state = ANEG_STATE_LINK_OK;
5373 }
5374 break;
5375
5376 case ANEG_STATE_LINK_OK:
5377 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5378 ret = ANEG_DONE;
5379 break;
5380
5381 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5382 /* ??? unimplemented */
5383 break;
5384
5385 case ANEG_STATE_NEXT_PAGE_WAIT:
5386 /* ??? unimplemented */
5387 break;
5388
5389 default:
5390 ret = ANEG_FAILED;
5391 break;
5392 }
5393
5394 return ret;
5395 }
5396
fiber_autoneg(struct tg3 * tp,u32 * txflags,u32 * rxflags)5397 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5398 {
5399 int res = 0;
5400 struct tg3_fiber_aneginfo aninfo;
5401 int status = ANEG_FAILED;
5402 unsigned int tick;
5403 u32 tmp;
5404
5405 tw32_f(MAC_TX_AUTO_NEG, 0);
5406
5407 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5408 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5409 udelay(40);
5410
5411 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5412 udelay(40);
5413
5414 memset(&aninfo, 0, sizeof(aninfo));
5415 aninfo.flags |= MR_AN_ENABLE;
5416 aninfo.state = ANEG_STATE_UNKNOWN;
5417 aninfo.cur_time = 0;
5418 tick = 0;
5419 while (++tick < 195000) {
5420 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5421 if (status == ANEG_DONE || status == ANEG_FAILED)
5422 break;
5423
5424 udelay(1);
5425 }
5426
5427 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5428 tw32_f(MAC_MODE, tp->mac_mode);
5429 udelay(40);
5430
5431 *txflags = aninfo.txconfig;
5432 *rxflags = aninfo.flags;
5433
5434 if (status == ANEG_DONE &&
5435 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5436 MR_LP_ADV_FULL_DUPLEX)))
5437 res = 1;
5438
5439 return res;
5440 }
5441
tg3_init_bcm8002(struct tg3 * tp)5442 static void tg3_init_bcm8002(struct tg3 *tp)
5443 {
5444 u32 mac_status = tr32(MAC_STATUS);
5445 int i;
5446
5447 /* Reset when initting first time or we have a link. */
5448 if (tg3_flag(tp, INIT_COMPLETE) &&
5449 !(mac_status & MAC_STATUS_PCS_SYNCED))
5450 return;
5451
5452 /* Set PLL lock range. */
5453 tg3_writephy(tp, 0x16, 0x8007);
5454
5455 /* SW reset */
5456 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5457
5458 /* Wait for reset to complete. */
5459 /* XXX schedule_timeout() ... */
5460 for (i = 0; i < 500; i++)
5461 udelay(10);
5462
5463 /* Config mode; select PMA/Ch 1 regs. */
5464 tg3_writephy(tp, 0x10, 0x8411);
5465
5466 /* Enable auto-lock and comdet, select txclk for tx. */
5467 tg3_writephy(tp, 0x11, 0x0a10);
5468
5469 tg3_writephy(tp, 0x18, 0x00a0);
5470 tg3_writephy(tp, 0x16, 0x41ff);
5471
5472 /* Assert and deassert POR. */
5473 tg3_writephy(tp, 0x13, 0x0400);
5474 udelay(40);
5475 tg3_writephy(tp, 0x13, 0x0000);
5476
5477 tg3_writephy(tp, 0x11, 0x0a50);
5478 udelay(40);
5479 tg3_writephy(tp, 0x11, 0x0a10);
5480
5481 /* Wait for signal to stabilize */
5482 /* XXX schedule_timeout() ... */
5483 for (i = 0; i < 15000; i++)
5484 udelay(10);
5485
5486 /* Deselect the channel register so we can read the PHYID
5487 * later.
5488 */
5489 tg3_writephy(tp, 0x10, 0x8011);
5490 }
5491
tg3_setup_fiber_hw_autoneg(struct tg3 * tp,u32 mac_status)5492 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5493 {
5494 u16 flowctrl;
5495 bool current_link_up;
5496 u32 sg_dig_ctrl, sg_dig_status;
5497 u32 serdes_cfg, expected_sg_dig_ctrl;
5498 int workaround, port_a;
5499
5500 serdes_cfg = 0;
5501 expected_sg_dig_ctrl = 0;
5502 workaround = 0;
5503 port_a = 1;
5504 current_link_up = false;
5505
5506 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5507 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5508 workaround = 1;
5509 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5510 port_a = 0;
5511
5512 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5513 /* preserve bits 20-23 for voltage regulator */
5514 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5515 }
5516
5517 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5518
5519 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5520 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5521 if (workaround) {
5522 u32 val = serdes_cfg;
5523
5524 if (port_a)
5525 val |= 0xc010000;
5526 else
5527 val |= 0x4010000;
5528 tw32_f(MAC_SERDES_CFG, val);
5529 }
5530
5531 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5532 }
5533 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5534 tg3_setup_flow_control(tp, 0, 0);
5535 current_link_up = true;
5536 }
5537 goto out;
5538 }
5539
5540 /* Want auto-negotiation. */
5541 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5542
5543 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5544 if (flowctrl & ADVERTISE_1000XPAUSE)
5545 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5546 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5547 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5548
5549 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5550 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5551 tp->serdes_counter &&
5552 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5553 MAC_STATUS_RCVD_CFG)) ==
5554 MAC_STATUS_PCS_SYNCED)) {
5555 tp->serdes_counter--;
5556 current_link_up = true;
5557 goto out;
5558 }
5559 restart_autoneg:
5560 if (workaround)
5561 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5562 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5563 udelay(5);
5564 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5565
5566 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5567 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5568 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5569 MAC_STATUS_SIGNAL_DET)) {
5570 sg_dig_status = tr32(SG_DIG_STATUS);
5571 mac_status = tr32(MAC_STATUS);
5572
5573 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5574 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5575 u32 local_adv = 0, remote_adv = 0;
5576
5577 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5578 local_adv |= ADVERTISE_1000XPAUSE;
5579 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5580 local_adv |= ADVERTISE_1000XPSE_ASYM;
5581
5582 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5583 remote_adv |= LPA_1000XPAUSE;
5584 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5585 remote_adv |= LPA_1000XPAUSE_ASYM;
5586
5587 tp->link_config.rmt_adv =
5588 mii_adv_to_ethtool_adv_x(remote_adv);
5589
5590 tg3_setup_flow_control(tp, local_adv, remote_adv);
5591 current_link_up = true;
5592 tp->serdes_counter = 0;
5593 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5594 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5595 if (tp->serdes_counter)
5596 tp->serdes_counter--;
5597 else {
5598 if (workaround) {
5599 u32 val = serdes_cfg;
5600
5601 if (port_a)
5602 val |= 0xc010000;
5603 else
5604 val |= 0x4010000;
5605
5606 tw32_f(MAC_SERDES_CFG, val);
5607 }
5608
5609 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5610 udelay(40);
5611
5612 /* Link parallel detection - link is up */
5613 /* only if we have PCS_SYNC and not */
5614 /* receiving config code words */
5615 mac_status = tr32(MAC_STATUS);
5616 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5617 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5618 tg3_setup_flow_control(tp, 0, 0);
5619 current_link_up = true;
5620 tp->phy_flags |=
5621 TG3_PHYFLG_PARALLEL_DETECT;
5622 tp->serdes_counter =
5623 SERDES_PARALLEL_DET_TIMEOUT;
5624 } else
5625 goto restart_autoneg;
5626 }
5627 }
5628 } else {
5629 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5630 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5631 }
5632
5633 out:
5634 return current_link_up;
5635 }
5636
tg3_setup_fiber_by_hand(struct tg3 * tp,u32 mac_status)5637 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5638 {
5639 bool current_link_up = false;
5640
5641 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5642 goto out;
5643
5644 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5645 u32 txflags, rxflags;
5646 int i;
5647
5648 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5649 u32 local_adv = 0, remote_adv = 0;
5650
5651 if (txflags & ANEG_CFG_PS1)
5652 local_adv |= ADVERTISE_1000XPAUSE;
5653 if (txflags & ANEG_CFG_PS2)
5654 local_adv |= ADVERTISE_1000XPSE_ASYM;
5655
5656 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5657 remote_adv |= LPA_1000XPAUSE;
5658 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5659 remote_adv |= LPA_1000XPAUSE_ASYM;
5660
5661 tp->link_config.rmt_adv =
5662 mii_adv_to_ethtool_adv_x(remote_adv);
5663
5664 tg3_setup_flow_control(tp, local_adv, remote_adv);
5665
5666 current_link_up = true;
5667 }
5668 for (i = 0; i < 30; i++) {
5669 udelay(20);
5670 tw32_f(MAC_STATUS,
5671 (MAC_STATUS_SYNC_CHANGED |
5672 MAC_STATUS_CFG_CHANGED));
5673 udelay(40);
5674 if ((tr32(MAC_STATUS) &
5675 (MAC_STATUS_SYNC_CHANGED |
5676 MAC_STATUS_CFG_CHANGED)) == 0)
5677 break;
5678 }
5679
5680 mac_status = tr32(MAC_STATUS);
5681 if (!current_link_up &&
5682 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5683 !(mac_status & MAC_STATUS_RCVD_CFG))
5684 current_link_up = true;
5685 } else {
5686 tg3_setup_flow_control(tp, 0, 0);
5687
5688 /* Forcing 1000FD link up. */
5689 current_link_up = true;
5690
5691 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5692 udelay(40);
5693
5694 tw32_f(MAC_MODE, tp->mac_mode);
5695 udelay(40);
5696 }
5697
5698 out:
5699 return current_link_up;
5700 }
5701
tg3_setup_fiber_phy(struct tg3 * tp,bool force_reset)5702 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5703 {
5704 u32 orig_pause_cfg;
5705 u16 orig_active_speed;
5706 u8 orig_active_duplex;
5707 u32 mac_status;
5708 bool current_link_up;
5709 int i;
5710
5711 orig_pause_cfg = tp->link_config.active_flowctrl;
5712 orig_active_speed = tp->link_config.active_speed;
5713 orig_active_duplex = tp->link_config.active_duplex;
5714
5715 if (!tg3_flag(tp, HW_AUTONEG) &&
5716 tp->link_up &&
5717 tg3_flag(tp, INIT_COMPLETE)) {
5718 mac_status = tr32(MAC_STATUS);
5719 mac_status &= (MAC_STATUS_PCS_SYNCED |
5720 MAC_STATUS_SIGNAL_DET |
5721 MAC_STATUS_CFG_CHANGED |
5722 MAC_STATUS_RCVD_CFG);
5723 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5724 MAC_STATUS_SIGNAL_DET)) {
5725 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5726 MAC_STATUS_CFG_CHANGED));
5727 return 0;
5728 }
5729 }
5730
5731 tw32_f(MAC_TX_AUTO_NEG, 0);
5732
5733 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5734 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5735 tw32_f(MAC_MODE, tp->mac_mode);
5736 udelay(40);
5737
5738 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5739 tg3_init_bcm8002(tp);
5740
5741 /* Enable link change event even when serdes polling. */
5742 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5743 udelay(40);
5744
5745 current_link_up = false;
5746 tp->link_config.rmt_adv = 0;
5747 mac_status = tr32(MAC_STATUS);
5748
5749 if (tg3_flag(tp, HW_AUTONEG))
5750 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5751 else
5752 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5753
5754 tp->napi[0].hw_status->status =
5755 (SD_STATUS_UPDATED |
5756 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5757
5758 for (i = 0; i < 100; i++) {
5759 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5760 MAC_STATUS_CFG_CHANGED));
5761 udelay(5);
5762 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5763 MAC_STATUS_CFG_CHANGED |
5764 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5765 break;
5766 }
5767
5768 mac_status = tr32(MAC_STATUS);
5769 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5770 current_link_up = false;
5771 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5772 tp->serdes_counter == 0) {
5773 tw32_f(MAC_MODE, (tp->mac_mode |
5774 MAC_MODE_SEND_CONFIGS));
5775 udelay(1);
5776 tw32_f(MAC_MODE, tp->mac_mode);
5777 }
5778 }
5779
5780 if (current_link_up) {
5781 tp->link_config.active_speed = SPEED_1000;
5782 tp->link_config.active_duplex = DUPLEX_FULL;
5783 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5784 LED_CTRL_LNKLED_OVERRIDE |
5785 LED_CTRL_1000MBPS_ON));
5786 } else {
5787 tp->link_config.active_speed = SPEED_UNKNOWN;
5788 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5789 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5790 LED_CTRL_LNKLED_OVERRIDE |
5791 LED_CTRL_TRAFFIC_OVERRIDE));
5792 }
5793
5794 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5795 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5796 if (orig_pause_cfg != now_pause_cfg ||
5797 orig_active_speed != tp->link_config.active_speed ||
5798 orig_active_duplex != tp->link_config.active_duplex)
5799 tg3_link_report(tp);
5800 }
5801
5802 return 0;
5803 }
5804
tg3_setup_fiber_mii_phy(struct tg3 * tp,bool force_reset)5805 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5806 {
5807 int err = 0;
5808 u32 bmsr, bmcr;
5809 u16 current_speed = SPEED_UNKNOWN;
5810 u8 current_duplex = DUPLEX_UNKNOWN;
5811 bool current_link_up = false;
5812 u32 local_adv, remote_adv, sgsr;
5813
5814 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5815 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5816 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5817 (sgsr & SERDES_TG3_SGMII_MODE)) {
5818
5819 if (force_reset)
5820 tg3_phy_reset(tp);
5821
5822 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5823
5824 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5825 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5826 } else {
5827 current_link_up = true;
5828 if (sgsr & SERDES_TG3_SPEED_1000) {
5829 current_speed = SPEED_1000;
5830 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5831 } else if (sgsr & SERDES_TG3_SPEED_100) {
5832 current_speed = SPEED_100;
5833 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5834 } else {
5835 current_speed = SPEED_10;
5836 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5837 }
5838
5839 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5840 current_duplex = DUPLEX_FULL;
5841 else
5842 current_duplex = DUPLEX_HALF;
5843 }
5844
5845 tw32_f(MAC_MODE, tp->mac_mode);
5846 udelay(40);
5847
5848 tg3_clear_mac_status(tp);
5849
5850 goto fiber_setup_done;
5851 }
5852
5853 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5854 tw32_f(MAC_MODE, tp->mac_mode);
5855 udelay(40);
5856
5857 tg3_clear_mac_status(tp);
5858
5859 if (force_reset)
5860 tg3_phy_reset(tp);
5861
5862 tp->link_config.rmt_adv = 0;
5863
5864 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5865 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5866 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5867 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5868 bmsr |= BMSR_LSTATUS;
5869 else
5870 bmsr &= ~BMSR_LSTATUS;
5871 }
5872
5873 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5874
5875 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5876 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5877 /* do nothing, just check for link up at the end */
5878 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5879 u32 adv, newadv;
5880
5881 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5882 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5883 ADVERTISE_1000XPAUSE |
5884 ADVERTISE_1000XPSE_ASYM |
5885 ADVERTISE_SLCT);
5886
5887 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5888 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5889
5890 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5891 tg3_writephy(tp, MII_ADVERTISE, newadv);
5892 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5893 tg3_writephy(tp, MII_BMCR, bmcr);
5894
5895 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5896 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5897 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5898
5899 return err;
5900 }
5901 } else {
5902 u32 new_bmcr;
5903
5904 bmcr &= ~BMCR_SPEED1000;
5905 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5906
5907 if (tp->link_config.duplex == DUPLEX_FULL)
5908 new_bmcr |= BMCR_FULLDPLX;
5909
5910 if (new_bmcr != bmcr) {
5911 /* BMCR_SPEED1000 is a reserved bit that needs
5912 * to be set on write.
5913 */
5914 new_bmcr |= BMCR_SPEED1000;
5915
5916 /* Force a linkdown */
5917 if (tp->link_up) {
5918 u32 adv;
5919
5920 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5921 adv &= ~(ADVERTISE_1000XFULL |
5922 ADVERTISE_1000XHALF |
5923 ADVERTISE_SLCT);
5924 tg3_writephy(tp, MII_ADVERTISE, adv);
5925 tg3_writephy(tp, MII_BMCR, bmcr |
5926 BMCR_ANRESTART |
5927 BMCR_ANENABLE);
5928 udelay(10);
5929 tg3_carrier_off(tp);
5930 }
5931 tg3_writephy(tp, MII_BMCR, new_bmcr);
5932 bmcr = new_bmcr;
5933 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5934 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5935 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5936 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5937 bmsr |= BMSR_LSTATUS;
5938 else
5939 bmsr &= ~BMSR_LSTATUS;
5940 }
5941 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5942 }
5943 }
5944
5945 if (bmsr & BMSR_LSTATUS) {
5946 current_speed = SPEED_1000;
5947 current_link_up = true;
5948 if (bmcr & BMCR_FULLDPLX)
5949 current_duplex = DUPLEX_FULL;
5950 else
5951 current_duplex = DUPLEX_HALF;
5952
5953 local_adv = 0;
5954 remote_adv = 0;
5955
5956 if (bmcr & BMCR_ANENABLE) {
5957 u32 common;
5958
5959 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5960 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5961 common = local_adv & remote_adv;
5962 if (common & (ADVERTISE_1000XHALF |
5963 ADVERTISE_1000XFULL)) {
5964 if (common & ADVERTISE_1000XFULL)
5965 current_duplex = DUPLEX_FULL;
5966 else
5967 current_duplex = DUPLEX_HALF;
5968
5969 tp->link_config.rmt_adv =
5970 mii_adv_to_ethtool_adv_x(remote_adv);
5971 } else if (!tg3_flag(tp, 5780_CLASS)) {
5972 /* Link is up via parallel detect */
5973 } else {
5974 current_link_up = false;
5975 }
5976 }
5977 }
5978
5979 fiber_setup_done:
5980 if (current_link_up && current_duplex == DUPLEX_FULL)
5981 tg3_setup_flow_control(tp, local_adv, remote_adv);
5982
5983 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5984 if (tp->link_config.active_duplex == DUPLEX_HALF)
5985 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5986
5987 tw32_f(MAC_MODE, tp->mac_mode);
5988 udelay(40);
5989
5990 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5991
5992 tp->link_config.active_speed = current_speed;
5993 tp->link_config.active_duplex = current_duplex;
5994
5995 tg3_test_and_report_link_chg(tp, current_link_up);
5996 return err;
5997 }
5998
tg3_serdes_parallel_detect(struct tg3 * tp)5999 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6000 {
6001 if (tp->serdes_counter) {
6002 /* Give autoneg time to complete. */
6003 tp->serdes_counter--;
6004 return;
6005 }
6006
6007 if (!tp->link_up &&
6008 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6009 u32 bmcr;
6010
6011 tg3_readphy(tp, MII_BMCR, &bmcr);
6012 if (bmcr & BMCR_ANENABLE) {
6013 u32 phy1, phy2;
6014
6015 /* Select shadow register 0x1f */
6016 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6017 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6018
6019 /* Select expansion interrupt status register */
6020 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6021 MII_TG3_DSP_EXP1_INT_STAT);
6022 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6023 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6024
6025 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6026 /* We have signal detect and not receiving
6027 * config code words, link is up by parallel
6028 * detection.
6029 */
6030
6031 bmcr &= ~BMCR_ANENABLE;
6032 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6033 tg3_writephy(tp, MII_BMCR, bmcr);
6034 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6035 }
6036 }
6037 } else if (tp->link_up &&
6038 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6039 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6040 u32 phy2;
6041
6042 /* Select expansion interrupt status register */
6043 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6044 MII_TG3_DSP_EXP1_INT_STAT);
6045 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6046 if (phy2 & 0x20) {
6047 u32 bmcr;
6048
6049 /* Config code words received, turn on autoneg. */
6050 tg3_readphy(tp, MII_BMCR, &bmcr);
6051 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6052
6053 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6054
6055 }
6056 }
6057 }
6058
tg3_setup_phy(struct tg3 * tp,bool force_reset)6059 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6060 {
6061 u32 val;
6062 int err;
6063
6064 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6065 err = tg3_setup_fiber_phy(tp, force_reset);
6066 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6067 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6068 else
6069 err = tg3_setup_copper_phy(tp, force_reset);
6070
6071 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6072 u32 scale;
6073
6074 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6075 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6076 scale = 65;
6077 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6078 scale = 6;
6079 else
6080 scale = 12;
6081
6082 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6083 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6084 tw32(GRC_MISC_CFG, val);
6085 }
6086
6087 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6088 (6 << TX_LENGTHS_IPG_SHIFT);
6089 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6090 tg3_asic_rev(tp) == ASIC_REV_5762)
6091 val |= tr32(MAC_TX_LENGTHS) &
6092 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6093 TX_LENGTHS_CNT_DWN_VAL_MSK);
6094
6095 if (tp->link_config.active_speed == SPEED_1000 &&
6096 tp->link_config.active_duplex == DUPLEX_HALF)
6097 tw32(MAC_TX_LENGTHS, val |
6098 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6099 else
6100 tw32(MAC_TX_LENGTHS, val |
6101 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6102
6103 if (!tg3_flag(tp, 5705_PLUS)) {
6104 if (tp->link_up) {
6105 tw32(HOSTCC_STAT_COAL_TICKS,
6106 tp->coal.stats_block_coalesce_usecs);
6107 } else {
6108 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6109 }
6110 }
6111
6112 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6113 val = tr32(PCIE_PWR_MGMT_THRESH);
6114 if (!tp->link_up)
6115 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6116 tp->pwrmgmt_thresh;
6117 else
6118 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6119 tw32(PCIE_PWR_MGMT_THRESH, val);
6120 }
6121
6122 return err;
6123 }
6124
6125 /* tp->lock must be held */
tg3_refclk_read(struct tg3 * tp)6126 static u64 tg3_refclk_read(struct tg3 *tp)
6127 {
6128 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6129 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6130 }
6131
6132 /* tp->lock must be held */
tg3_refclk_write(struct tg3 * tp,u64 newval)6133 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6134 {
6135 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6136
6137 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6138 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6139 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6140 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6141 }
6142
6143 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6144 static inline void tg3_full_unlock(struct tg3 *tp);
tg3_get_ts_info(struct net_device * dev,struct ethtool_ts_info * info)6145 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6146 {
6147 struct tg3 *tp = netdev_priv(dev);
6148
6149 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6150 SOF_TIMESTAMPING_RX_SOFTWARE |
6151 SOF_TIMESTAMPING_SOFTWARE;
6152
6153 if (tg3_flag(tp, PTP_CAPABLE)) {
6154 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6155 SOF_TIMESTAMPING_RX_HARDWARE |
6156 SOF_TIMESTAMPING_RAW_HARDWARE;
6157 }
6158
6159 if (tp->ptp_clock)
6160 info->phc_index = ptp_clock_index(tp->ptp_clock);
6161 else
6162 info->phc_index = -1;
6163
6164 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6165
6166 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6167 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6168 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6169 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6170 return 0;
6171 }
6172
tg3_ptp_adjfreq(struct ptp_clock_info * ptp,s32 ppb)6173 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6174 {
6175 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6176 bool neg_adj = false;
6177 u32 correction = 0;
6178
6179 if (ppb < 0) {
6180 neg_adj = true;
6181 ppb = -ppb;
6182 }
6183
6184 /* Frequency adjustment is performed using hardware with a 24 bit
6185 * accumulator and a programmable correction value. On each clk, the
6186 * correction value gets added to the accumulator and when it
6187 * overflows, the time counter is incremented/decremented.
6188 *
6189 * So conversion from ppb to correction value is
6190 * ppb * (1 << 24) / 1000000000
6191 */
6192 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6193 TG3_EAV_REF_CLK_CORRECT_MASK;
6194
6195 tg3_full_lock(tp, 0);
6196
6197 if (correction)
6198 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6199 TG3_EAV_REF_CLK_CORRECT_EN |
6200 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6201 else
6202 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6203
6204 tg3_full_unlock(tp);
6205
6206 return 0;
6207 }
6208
tg3_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)6209 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6210 {
6211 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6212
6213 tg3_full_lock(tp, 0);
6214 tp->ptp_adjust += delta;
6215 tg3_full_unlock(tp);
6216
6217 return 0;
6218 }
6219
tg3_ptp_gettime(struct ptp_clock_info * ptp,struct timespec64 * ts)6220 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6221 {
6222 u64 ns;
6223 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6224
6225 tg3_full_lock(tp, 0);
6226 ns = tg3_refclk_read(tp);
6227 ns += tp->ptp_adjust;
6228 tg3_full_unlock(tp);
6229
6230 *ts = ns_to_timespec64(ns);
6231
6232 return 0;
6233 }
6234
tg3_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)6235 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6236 const struct timespec64 *ts)
6237 {
6238 u64 ns;
6239 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6240
6241 ns = timespec64_to_ns(ts);
6242
6243 tg3_full_lock(tp, 0);
6244 tg3_refclk_write(tp, ns);
6245 tp->ptp_adjust = 0;
6246 tg3_full_unlock(tp);
6247
6248 return 0;
6249 }
6250
tg3_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)6251 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6252 struct ptp_clock_request *rq, int on)
6253 {
6254 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6255 u32 clock_ctl;
6256 int rval = 0;
6257
6258 switch (rq->type) {
6259 case PTP_CLK_REQ_PEROUT:
6260 if (rq->perout.index != 0)
6261 return -EINVAL;
6262
6263 tg3_full_lock(tp, 0);
6264 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6265 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6266
6267 if (on) {
6268 u64 nsec;
6269
6270 nsec = rq->perout.start.sec * 1000000000ULL +
6271 rq->perout.start.nsec;
6272
6273 if (rq->perout.period.sec || rq->perout.period.nsec) {
6274 netdev_warn(tp->dev,
6275 "Device supports only a one-shot timesync output, period must be 0\n");
6276 rval = -EINVAL;
6277 goto err_out;
6278 }
6279
6280 if (nsec & (1ULL << 63)) {
6281 netdev_warn(tp->dev,
6282 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6283 rval = -EINVAL;
6284 goto err_out;
6285 }
6286
6287 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6288 tw32(TG3_EAV_WATCHDOG0_MSB,
6289 TG3_EAV_WATCHDOG0_EN |
6290 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6291
6292 tw32(TG3_EAV_REF_CLCK_CTL,
6293 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6294 } else {
6295 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6296 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6297 }
6298
6299 err_out:
6300 tg3_full_unlock(tp);
6301 return rval;
6302
6303 default:
6304 break;
6305 }
6306
6307 return -EOPNOTSUPP;
6308 }
6309
6310 static const struct ptp_clock_info tg3_ptp_caps = {
6311 .owner = THIS_MODULE,
6312 .name = "tg3 clock",
6313 .max_adj = 250000000,
6314 .n_alarm = 0,
6315 .n_ext_ts = 0,
6316 .n_per_out = 1,
6317 .n_pins = 0,
6318 .pps = 0,
6319 .adjfreq = tg3_ptp_adjfreq,
6320 .adjtime = tg3_ptp_adjtime,
6321 .gettime64 = tg3_ptp_gettime,
6322 .settime64 = tg3_ptp_settime,
6323 .enable = tg3_ptp_enable,
6324 };
6325
tg3_hwclock_to_timestamp(struct tg3 * tp,u64 hwclock,struct skb_shared_hwtstamps * timestamp)6326 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6327 struct skb_shared_hwtstamps *timestamp)
6328 {
6329 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6330 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6331 tp->ptp_adjust);
6332 }
6333
6334 /* tp->lock must be held */
tg3_ptp_init(struct tg3 * tp)6335 static void tg3_ptp_init(struct tg3 *tp)
6336 {
6337 if (!tg3_flag(tp, PTP_CAPABLE))
6338 return;
6339
6340 /* Initialize the hardware clock to the system time. */
6341 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6342 tp->ptp_adjust = 0;
6343 tp->ptp_info = tg3_ptp_caps;
6344 }
6345
6346 /* tp->lock must be held */
tg3_ptp_resume(struct tg3 * tp)6347 static void tg3_ptp_resume(struct tg3 *tp)
6348 {
6349 if (!tg3_flag(tp, PTP_CAPABLE))
6350 return;
6351
6352 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6353 tp->ptp_adjust = 0;
6354 }
6355
tg3_ptp_fini(struct tg3 * tp)6356 static void tg3_ptp_fini(struct tg3 *tp)
6357 {
6358 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6359 return;
6360
6361 ptp_clock_unregister(tp->ptp_clock);
6362 tp->ptp_clock = NULL;
6363 tp->ptp_adjust = 0;
6364 }
6365
tg3_irq_sync(struct tg3 * tp)6366 static inline int tg3_irq_sync(struct tg3 *tp)
6367 {
6368 return tp->irq_sync;
6369 }
6370
tg3_rd32_loop(struct tg3 * tp,u32 * dst,u32 off,u32 len)6371 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6372 {
6373 int i;
6374
6375 dst = (u32 *)((u8 *)dst + off);
6376 for (i = 0; i < len; i += sizeof(u32))
6377 *dst++ = tr32(off + i);
6378 }
6379
tg3_dump_legacy_regs(struct tg3 * tp,u32 * regs)6380 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6381 {
6382 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6383 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6384 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6385 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6386 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6387 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6388 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6389 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6390 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6391 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6392 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6393 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6394 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6395 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6396 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6397 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6398 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6399 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6400 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6401
6402 if (tg3_flag(tp, SUPPORT_MSIX))
6403 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6404
6405 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6406 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6407 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6408 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6409 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6410 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6411 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6412 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6413
6414 if (!tg3_flag(tp, 5705_PLUS)) {
6415 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6416 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6417 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6418 }
6419
6420 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6421 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6422 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6423 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6424 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6425
6426 if (tg3_flag(tp, NVRAM))
6427 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6428 }
6429
tg3_dump_state(struct tg3 * tp)6430 static void tg3_dump_state(struct tg3 *tp)
6431 {
6432 int i;
6433 u32 *regs;
6434
6435 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6436 if (!regs)
6437 return;
6438
6439 if (tg3_flag(tp, PCI_EXPRESS)) {
6440 /* Read up to but not including private PCI registers */
6441 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6442 regs[i / sizeof(u32)] = tr32(i);
6443 } else
6444 tg3_dump_legacy_regs(tp, regs);
6445
6446 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6447 if (!regs[i + 0] && !regs[i + 1] &&
6448 !regs[i + 2] && !regs[i + 3])
6449 continue;
6450
6451 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6452 i * 4,
6453 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6454 }
6455
6456 kfree(regs);
6457
6458 for (i = 0; i < tp->irq_cnt; i++) {
6459 struct tg3_napi *tnapi = &tp->napi[i];
6460
6461 /* SW status block */
6462 netdev_err(tp->dev,
6463 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6464 i,
6465 tnapi->hw_status->status,
6466 tnapi->hw_status->status_tag,
6467 tnapi->hw_status->rx_jumbo_consumer,
6468 tnapi->hw_status->rx_consumer,
6469 tnapi->hw_status->rx_mini_consumer,
6470 tnapi->hw_status->idx[0].rx_producer,
6471 tnapi->hw_status->idx[0].tx_consumer);
6472
6473 netdev_err(tp->dev,
6474 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6475 i,
6476 tnapi->last_tag, tnapi->last_irq_tag,
6477 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6478 tnapi->rx_rcb_ptr,
6479 tnapi->prodring.rx_std_prod_idx,
6480 tnapi->prodring.rx_std_cons_idx,
6481 tnapi->prodring.rx_jmb_prod_idx,
6482 tnapi->prodring.rx_jmb_cons_idx);
6483 }
6484 }
6485
6486 /* This is called whenever we suspect that the system chipset is re-
6487 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6488 * is bogus tx completions. We try to recover by setting the
6489 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6490 * in the workqueue.
6491 */
tg3_tx_recover(struct tg3 * tp)6492 static void tg3_tx_recover(struct tg3 *tp)
6493 {
6494 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6495 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6496
6497 netdev_warn(tp->dev,
6498 "The system may be re-ordering memory-mapped I/O "
6499 "cycles to the network device, attempting to recover. "
6500 "Please report the problem to the driver maintainer "
6501 "and include system chipset information.\n");
6502
6503 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6504 }
6505
tg3_tx_avail(struct tg3_napi * tnapi)6506 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6507 {
6508 /* Tell compiler to fetch tx indices from memory. */
6509 barrier();
6510 return tnapi->tx_pending -
6511 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6512 }
6513
6514 /* Tigon3 never reports partial packet sends. So we do not
6515 * need special logic to handle SKBs that have not had all
6516 * of their frags sent yet, like SunGEM does.
6517 */
tg3_tx(struct tg3_napi * tnapi)6518 static void tg3_tx(struct tg3_napi *tnapi)
6519 {
6520 struct tg3 *tp = tnapi->tp;
6521 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6522 u32 sw_idx = tnapi->tx_cons;
6523 struct netdev_queue *txq;
6524 int index = tnapi - tp->napi;
6525 unsigned int pkts_compl = 0, bytes_compl = 0;
6526
6527 if (tg3_flag(tp, ENABLE_TSS))
6528 index--;
6529
6530 txq = netdev_get_tx_queue(tp->dev, index);
6531
6532 while (sw_idx != hw_idx) {
6533 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6534 struct sk_buff *skb = ri->skb;
6535 int i, tx_bug = 0;
6536
6537 if (unlikely(skb == NULL)) {
6538 tg3_tx_recover(tp);
6539 return;
6540 }
6541
6542 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6543 struct skb_shared_hwtstamps timestamp;
6544 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6545 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6546
6547 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6548
6549 skb_tstamp_tx(skb, ×tamp);
6550 }
6551
6552 pci_unmap_single(tp->pdev,
6553 dma_unmap_addr(ri, mapping),
6554 skb_headlen(skb),
6555 PCI_DMA_TODEVICE);
6556
6557 ri->skb = NULL;
6558
6559 while (ri->fragmented) {
6560 ri->fragmented = false;
6561 sw_idx = NEXT_TX(sw_idx);
6562 ri = &tnapi->tx_buffers[sw_idx];
6563 }
6564
6565 sw_idx = NEXT_TX(sw_idx);
6566
6567 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6568 ri = &tnapi->tx_buffers[sw_idx];
6569 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6570 tx_bug = 1;
6571
6572 pci_unmap_page(tp->pdev,
6573 dma_unmap_addr(ri, mapping),
6574 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6575 PCI_DMA_TODEVICE);
6576
6577 while (ri->fragmented) {
6578 ri->fragmented = false;
6579 sw_idx = NEXT_TX(sw_idx);
6580 ri = &tnapi->tx_buffers[sw_idx];
6581 }
6582
6583 sw_idx = NEXT_TX(sw_idx);
6584 }
6585
6586 pkts_compl++;
6587 bytes_compl += skb->len;
6588
6589 dev_kfree_skb_any(skb);
6590
6591 if (unlikely(tx_bug)) {
6592 tg3_tx_recover(tp);
6593 return;
6594 }
6595 }
6596
6597 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6598
6599 tnapi->tx_cons = sw_idx;
6600
6601 /* Need to make the tx_cons update visible to tg3_start_xmit()
6602 * before checking for netif_queue_stopped(). Without the
6603 * memory barrier, there is a small possibility that tg3_start_xmit()
6604 * will miss it and cause the queue to be stopped forever.
6605 */
6606 smp_mb();
6607
6608 if (unlikely(netif_tx_queue_stopped(txq) &&
6609 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6610 __netif_tx_lock(txq, smp_processor_id());
6611 if (netif_tx_queue_stopped(txq) &&
6612 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6613 netif_tx_wake_queue(txq);
6614 __netif_tx_unlock(txq);
6615 }
6616 }
6617
tg3_frag_free(bool is_frag,void * data)6618 static void tg3_frag_free(bool is_frag, void *data)
6619 {
6620 if (is_frag)
6621 skb_free_frag(data);
6622 else
6623 kfree(data);
6624 }
6625
tg3_rx_data_free(struct tg3 * tp,struct ring_info * ri,u32 map_sz)6626 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6627 {
6628 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6629 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6630
6631 if (!ri->data)
6632 return;
6633
6634 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6635 map_sz, PCI_DMA_FROMDEVICE);
6636 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6637 ri->data = NULL;
6638 }
6639
6640
6641 /* Returns size of skb allocated or < 0 on error.
6642 *
6643 * We only need to fill in the address because the other members
6644 * of the RX descriptor are invariant, see tg3_init_rings.
6645 *
6646 * Note the purposeful assymetry of cpu vs. chip accesses. For
6647 * posting buffers we only dirty the first cache line of the RX
6648 * descriptor (containing the address). Whereas for the RX status
6649 * buffers the cpu only reads the last cacheline of the RX descriptor
6650 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6651 */
tg3_alloc_rx_data(struct tg3 * tp,struct tg3_rx_prodring_set * tpr,u32 opaque_key,u32 dest_idx_unmasked,unsigned int * frag_size)6652 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6653 u32 opaque_key, u32 dest_idx_unmasked,
6654 unsigned int *frag_size)
6655 {
6656 struct tg3_rx_buffer_desc *desc;
6657 struct ring_info *map;
6658 u8 *data;
6659 dma_addr_t mapping;
6660 int skb_size, data_size, dest_idx;
6661
6662 switch (opaque_key) {
6663 case RXD_OPAQUE_RING_STD:
6664 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6665 desc = &tpr->rx_std[dest_idx];
6666 map = &tpr->rx_std_buffers[dest_idx];
6667 data_size = tp->rx_pkt_map_sz;
6668 break;
6669
6670 case RXD_OPAQUE_RING_JUMBO:
6671 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6672 desc = &tpr->rx_jmb[dest_idx].std;
6673 map = &tpr->rx_jmb_buffers[dest_idx];
6674 data_size = TG3_RX_JMB_MAP_SZ;
6675 break;
6676
6677 default:
6678 return -EINVAL;
6679 }
6680
6681 /* Do not overwrite any of the map or rp information
6682 * until we are sure we can commit to a new buffer.
6683 *
6684 * Callers depend upon this behavior and assume that
6685 * we leave everything unchanged if we fail.
6686 */
6687 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6688 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6689 if (skb_size <= PAGE_SIZE) {
6690 data = netdev_alloc_frag(skb_size);
6691 *frag_size = skb_size;
6692 } else {
6693 data = kmalloc(skb_size, GFP_ATOMIC);
6694 *frag_size = 0;
6695 }
6696 if (!data)
6697 return -ENOMEM;
6698
6699 mapping = pci_map_single(tp->pdev,
6700 data + TG3_RX_OFFSET(tp),
6701 data_size,
6702 PCI_DMA_FROMDEVICE);
6703 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6704 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6705 return -EIO;
6706 }
6707
6708 map->data = data;
6709 dma_unmap_addr_set(map, mapping, mapping);
6710
6711 desc->addr_hi = ((u64)mapping >> 32);
6712 desc->addr_lo = ((u64)mapping & 0xffffffff);
6713
6714 return data_size;
6715 }
6716
6717 /* We only need to move over in the address because the other
6718 * members of the RX descriptor are invariant. See notes above
6719 * tg3_alloc_rx_data for full details.
6720 */
tg3_recycle_rx(struct tg3_napi * tnapi,struct tg3_rx_prodring_set * dpr,u32 opaque_key,int src_idx,u32 dest_idx_unmasked)6721 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6722 struct tg3_rx_prodring_set *dpr,
6723 u32 opaque_key, int src_idx,
6724 u32 dest_idx_unmasked)
6725 {
6726 struct tg3 *tp = tnapi->tp;
6727 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6728 struct ring_info *src_map, *dest_map;
6729 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6730 int dest_idx;
6731
6732 switch (opaque_key) {
6733 case RXD_OPAQUE_RING_STD:
6734 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6735 dest_desc = &dpr->rx_std[dest_idx];
6736 dest_map = &dpr->rx_std_buffers[dest_idx];
6737 src_desc = &spr->rx_std[src_idx];
6738 src_map = &spr->rx_std_buffers[src_idx];
6739 break;
6740
6741 case RXD_OPAQUE_RING_JUMBO:
6742 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6743 dest_desc = &dpr->rx_jmb[dest_idx].std;
6744 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6745 src_desc = &spr->rx_jmb[src_idx].std;
6746 src_map = &spr->rx_jmb_buffers[src_idx];
6747 break;
6748
6749 default:
6750 return;
6751 }
6752
6753 dest_map->data = src_map->data;
6754 dma_unmap_addr_set(dest_map, mapping,
6755 dma_unmap_addr(src_map, mapping));
6756 dest_desc->addr_hi = src_desc->addr_hi;
6757 dest_desc->addr_lo = src_desc->addr_lo;
6758
6759 /* Ensure that the update to the skb happens after the physical
6760 * addresses have been transferred to the new BD location.
6761 */
6762 smp_wmb();
6763
6764 src_map->data = NULL;
6765 }
6766
6767 /* The RX ring scheme is composed of multiple rings which post fresh
6768 * buffers to the chip, and one special ring the chip uses to report
6769 * status back to the host.
6770 *
6771 * The special ring reports the status of received packets to the
6772 * host. The chip does not write into the original descriptor the
6773 * RX buffer was obtained from. The chip simply takes the original
6774 * descriptor as provided by the host, updates the status and length
6775 * field, then writes this into the next status ring entry.
6776 *
6777 * Each ring the host uses to post buffers to the chip is described
6778 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6779 * it is first placed into the on-chip ram. When the packet's length
6780 * is known, it walks down the TG3_BDINFO entries to select the ring.
6781 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6782 * which is within the range of the new packet's length is chosen.
6783 *
6784 * The "separate ring for rx status" scheme may sound queer, but it makes
6785 * sense from a cache coherency perspective. If only the host writes
6786 * to the buffer post rings, and only the chip writes to the rx status
6787 * rings, then cache lines never move beyond shared-modified state.
6788 * If both the host and chip were to write into the same ring, cache line
6789 * eviction could occur since both entities want it in an exclusive state.
6790 */
tg3_rx(struct tg3_napi * tnapi,int budget)6791 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6792 {
6793 struct tg3 *tp = tnapi->tp;
6794 u32 work_mask, rx_std_posted = 0;
6795 u32 std_prod_idx, jmb_prod_idx;
6796 u32 sw_idx = tnapi->rx_rcb_ptr;
6797 u16 hw_idx;
6798 int received;
6799 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6800
6801 hw_idx = *(tnapi->rx_rcb_prod_idx);
6802 /*
6803 * We need to order the read of hw_idx and the read of
6804 * the opaque cookie.
6805 */
6806 rmb();
6807 work_mask = 0;
6808 received = 0;
6809 std_prod_idx = tpr->rx_std_prod_idx;
6810 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6811 while (sw_idx != hw_idx && budget > 0) {
6812 struct ring_info *ri;
6813 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6814 unsigned int len;
6815 struct sk_buff *skb;
6816 dma_addr_t dma_addr;
6817 u32 opaque_key, desc_idx, *post_ptr;
6818 u8 *data;
6819 u64 tstamp = 0;
6820
6821 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6822 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6823 if (opaque_key == RXD_OPAQUE_RING_STD) {
6824 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6825 dma_addr = dma_unmap_addr(ri, mapping);
6826 data = ri->data;
6827 post_ptr = &std_prod_idx;
6828 rx_std_posted++;
6829 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6830 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6831 dma_addr = dma_unmap_addr(ri, mapping);
6832 data = ri->data;
6833 post_ptr = &jmb_prod_idx;
6834 } else
6835 goto next_pkt_nopost;
6836
6837 work_mask |= opaque_key;
6838
6839 if (desc->err_vlan & RXD_ERR_MASK) {
6840 drop_it:
6841 tg3_recycle_rx(tnapi, tpr, opaque_key,
6842 desc_idx, *post_ptr);
6843 drop_it_no_recycle:
6844 /* Other statistics kept track of by card. */
6845 tp->rx_dropped++;
6846 goto next_pkt;
6847 }
6848
6849 prefetch(data + TG3_RX_OFFSET(tp));
6850 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6851 ETH_FCS_LEN;
6852
6853 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6854 RXD_FLAG_PTPSTAT_PTPV1 ||
6855 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6856 RXD_FLAG_PTPSTAT_PTPV2) {
6857 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6858 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6859 }
6860
6861 if (len > TG3_RX_COPY_THRESH(tp)) {
6862 int skb_size;
6863 unsigned int frag_size;
6864
6865 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6866 *post_ptr, &frag_size);
6867 if (skb_size < 0)
6868 goto drop_it;
6869
6870 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6871 PCI_DMA_FROMDEVICE);
6872
6873 /* Ensure that the update to the data happens
6874 * after the usage of the old DMA mapping.
6875 */
6876 smp_wmb();
6877
6878 ri->data = NULL;
6879
6880 skb = build_skb(data, frag_size);
6881 if (!skb) {
6882 tg3_frag_free(frag_size != 0, data);
6883 goto drop_it_no_recycle;
6884 }
6885 skb_reserve(skb, TG3_RX_OFFSET(tp));
6886 } else {
6887 tg3_recycle_rx(tnapi, tpr, opaque_key,
6888 desc_idx, *post_ptr);
6889
6890 skb = netdev_alloc_skb(tp->dev,
6891 len + TG3_RAW_IP_ALIGN);
6892 if (skb == NULL)
6893 goto drop_it_no_recycle;
6894
6895 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6896 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6897 memcpy(skb->data,
6898 data + TG3_RX_OFFSET(tp),
6899 len);
6900 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6901 }
6902
6903 skb_put(skb, len);
6904 if (tstamp)
6905 tg3_hwclock_to_timestamp(tp, tstamp,
6906 skb_hwtstamps(skb));
6907
6908 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6909 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6910 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6911 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6912 skb->ip_summed = CHECKSUM_UNNECESSARY;
6913 else
6914 skb_checksum_none_assert(skb);
6915
6916 skb->protocol = eth_type_trans(skb, tp->dev);
6917
6918 if (len > (tp->dev->mtu + ETH_HLEN) &&
6919 skb->protocol != htons(ETH_P_8021Q) &&
6920 skb->protocol != htons(ETH_P_8021AD)) {
6921 dev_kfree_skb_any(skb);
6922 goto drop_it_no_recycle;
6923 }
6924
6925 if (desc->type_flags & RXD_FLAG_VLAN &&
6926 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6927 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6928 desc->err_vlan & RXD_VLAN_MASK);
6929
6930 napi_gro_receive(&tnapi->napi, skb);
6931
6932 received++;
6933 budget--;
6934
6935 next_pkt:
6936 (*post_ptr)++;
6937
6938 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6939 tpr->rx_std_prod_idx = std_prod_idx &
6940 tp->rx_std_ring_mask;
6941 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6942 tpr->rx_std_prod_idx);
6943 work_mask &= ~RXD_OPAQUE_RING_STD;
6944 rx_std_posted = 0;
6945 }
6946 next_pkt_nopost:
6947 sw_idx++;
6948 sw_idx &= tp->rx_ret_ring_mask;
6949
6950 /* Refresh hw_idx to see if there is new work */
6951 if (sw_idx == hw_idx) {
6952 hw_idx = *(tnapi->rx_rcb_prod_idx);
6953 rmb();
6954 }
6955 }
6956
6957 /* ACK the status ring. */
6958 tnapi->rx_rcb_ptr = sw_idx;
6959 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6960
6961 /* Refill RX ring(s). */
6962 if (!tg3_flag(tp, ENABLE_RSS)) {
6963 /* Sync BD data before updating mailbox */
6964 wmb();
6965
6966 if (work_mask & RXD_OPAQUE_RING_STD) {
6967 tpr->rx_std_prod_idx = std_prod_idx &
6968 tp->rx_std_ring_mask;
6969 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6970 tpr->rx_std_prod_idx);
6971 }
6972 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6973 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6974 tp->rx_jmb_ring_mask;
6975 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6976 tpr->rx_jmb_prod_idx);
6977 }
6978 mmiowb();
6979 } else if (work_mask) {
6980 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6981 * updated before the producer indices can be updated.
6982 */
6983 smp_wmb();
6984
6985 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6986 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6987
6988 if (tnapi != &tp->napi[1]) {
6989 tp->rx_refill = true;
6990 napi_schedule(&tp->napi[1].napi);
6991 }
6992 }
6993
6994 return received;
6995 }
6996
tg3_poll_link(struct tg3 * tp)6997 static void tg3_poll_link(struct tg3 *tp)
6998 {
6999 /* handle link change and other phy events */
7000 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7001 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7002
7003 if (sblk->status & SD_STATUS_LINK_CHG) {
7004 sblk->status = SD_STATUS_UPDATED |
7005 (sblk->status & ~SD_STATUS_LINK_CHG);
7006 spin_lock(&tp->lock);
7007 if (tg3_flag(tp, USE_PHYLIB)) {
7008 tw32_f(MAC_STATUS,
7009 (MAC_STATUS_SYNC_CHANGED |
7010 MAC_STATUS_CFG_CHANGED |
7011 MAC_STATUS_MI_COMPLETION |
7012 MAC_STATUS_LNKSTATE_CHANGED));
7013 udelay(40);
7014 } else
7015 tg3_setup_phy(tp, false);
7016 spin_unlock(&tp->lock);
7017 }
7018 }
7019 }
7020
tg3_rx_prodring_xfer(struct tg3 * tp,struct tg3_rx_prodring_set * dpr,struct tg3_rx_prodring_set * spr)7021 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7022 struct tg3_rx_prodring_set *dpr,
7023 struct tg3_rx_prodring_set *spr)
7024 {
7025 u32 si, di, cpycnt, src_prod_idx;
7026 int i, err = 0;
7027
7028 while (1) {
7029 src_prod_idx = spr->rx_std_prod_idx;
7030
7031 /* Make sure updates to the rx_std_buffers[] entries and the
7032 * standard producer index are seen in the correct order.
7033 */
7034 smp_rmb();
7035
7036 if (spr->rx_std_cons_idx == src_prod_idx)
7037 break;
7038
7039 if (spr->rx_std_cons_idx < src_prod_idx)
7040 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7041 else
7042 cpycnt = tp->rx_std_ring_mask + 1 -
7043 spr->rx_std_cons_idx;
7044
7045 cpycnt = min(cpycnt,
7046 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7047
7048 si = spr->rx_std_cons_idx;
7049 di = dpr->rx_std_prod_idx;
7050
7051 for (i = di; i < di + cpycnt; i++) {
7052 if (dpr->rx_std_buffers[i].data) {
7053 cpycnt = i - di;
7054 err = -ENOSPC;
7055 break;
7056 }
7057 }
7058
7059 if (!cpycnt)
7060 break;
7061
7062 /* Ensure that updates to the rx_std_buffers ring and the
7063 * shadowed hardware producer ring from tg3_recycle_skb() are
7064 * ordered correctly WRT the skb check above.
7065 */
7066 smp_rmb();
7067
7068 memcpy(&dpr->rx_std_buffers[di],
7069 &spr->rx_std_buffers[si],
7070 cpycnt * sizeof(struct ring_info));
7071
7072 for (i = 0; i < cpycnt; i++, di++, si++) {
7073 struct tg3_rx_buffer_desc *sbd, *dbd;
7074 sbd = &spr->rx_std[si];
7075 dbd = &dpr->rx_std[di];
7076 dbd->addr_hi = sbd->addr_hi;
7077 dbd->addr_lo = sbd->addr_lo;
7078 }
7079
7080 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7081 tp->rx_std_ring_mask;
7082 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7083 tp->rx_std_ring_mask;
7084 }
7085
7086 while (1) {
7087 src_prod_idx = spr->rx_jmb_prod_idx;
7088
7089 /* Make sure updates to the rx_jmb_buffers[] entries and
7090 * the jumbo producer index are seen in the correct order.
7091 */
7092 smp_rmb();
7093
7094 if (spr->rx_jmb_cons_idx == src_prod_idx)
7095 break;
7096
7097 if (spr->rx_jmb_cons_idx < src_prod_idx)
7098 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7099 else
7100 cpycnt = tp->rx_jmb_ring_mask + 1 -
7101 spr->rx_jmb_cons_idx;
7102
7103 cpycnt = min(cpycnt,
7104 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7105
7106 si = spr->rx_jmb_cons_idx;
7107 di = dpr->rx_jmb_prod_idx;
7108
7109 for (i = di; i < di + cpycnt; i++) {
7110 if (dpr->rx_jmb_buffers[i].data) {
7111 cpycnt = i - di;
7112 err = -ENOSPC;
7113 break;
7114 }
7115 }
7116
7117 if (!cpycnt)
7118 break;
7119
7120 /* Ensure that updates to the rx_jmb_buffers ring and the
7121 * shadowed hardware producer ring from tg3_recycle_skb() are
7122 * ordered correctly WRT the skb check above.
7123 */
7124 smp_rmb();
7125
7126 memcpy(&dpr->rx_jmb_buffers[di],
7127 &spr->rx_jmb_buffers[si],
7128 cpycnt * sizeof(struct ring_info));
7129
7130 for (i = 0; i < cpycnt; i++, di++, si++) {
7131 struct tg3_rx_buffer_desc *sbd, *dbd;
7132 sbd = &spr->rx_jmb[si].std;
7133 dbd = &dpr->rx_jmb[di].std;
7134 dbd->addr_hi = sbd->addr_hi;
7135 dbd->addr_lo = sbd->addr_lo;
7136 }
7137
7138 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7139 tp->rx_jmb_ring_mask;
7140 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7141 tp->rx_jmb_ring_mask;
7142 }
7143
7144 return err;
7145 }
7146
tg3_poll_work(struct tg3_napi * tnapi,int work_done,int budget)7147 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7148 {
7149 struct tg3 *tp = tnapi->tp;
7150
7151 /* run TX completion thread */
7152 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7153 tg3_tx(tnapi);
7154 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7155 return work_done;
7156 }
7157
7158 if (!tnapi->rx_rcb_prod_idx)
7159 return work_done;
7160
7161 /* run RX thread, within the bounds set by NAPI.
7162 * All RX "locking" is done by ensuring outside
7163 * code synchronizes with tg3->napi.poll()
7164 */
7165 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7166 work_done += tg3_rx(tnapi, budget - work_done);
7167
7168 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7169 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7170 int i, err = 0;
7171 u32 std_prod_idx = dpr->rx_std_prod_idx;
7172 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7173
7174 tp->rx_refill = false;
7175 for (i = 1; i <= tp->rxq_cnt; i++)
7176 err |= tg3_rx_prodring_xfer(tp, dpr,
7177 &tp->napi[i].prodring);
7178
7179 wmb();
7180
7181 if (std_prod_idx != dpr->rx_std_prod_idx)
7182 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7183 dpr->rx_std_prod_idx);
7184
7185 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7186 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7187 dpr->rx_jmb_prod_idx);
7188
7189 mmiowb();
7190
7191 if (err)
7192 tw32_f(HOSTCC_MODE, tp->coal_now);
7193 }
7194
7195 return work_done;
7196 }
7197
tg3_reset_task_schedule(struct tg3 * tp)7198 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7199 {
7200 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7201 schedule_work(&tp->reset_task);
7202 }
7203
tg3_reset_task_cancel(struct tg3 * tp)7204 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7205 {
7206 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7207 cancel_work_sync(&tp->reset_task);
7208 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7209 }
7210
tg3_poll_msix(struct napi_struct * napi,int budget)7211 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7212 {
7213 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7214 struct tg3 *tp = tnapi->tp;
7215 int work_done = 0;
7216 struct tg3_hw_status *sblk = tnapi->hw_status;
7217
7218 while (1) {
7219 work_done = tg3_poll_work(tnapi, work_done, budget);
7220
7221 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7222 goto tx_recovery;
7223
7224 if (unlikely(work_done >= budget))
7225 break;
7226
7227 /* tp->last_tag is used in tg3_int_reenable() below
7228 * to tell the hw how much work has been processed,
7229 * so we must read it before checking for more work.
7230 */
7231 tnapi->last_tag = sblk->status_tag;
7232 tnapi->last_irq_tag = tnapi->last_tag;
7233 rmb();
7234
7235 /* check for RX/TX work to do */
7236 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7237 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7238
7239 /* This test here is not race free, but will reduce
7240 * the number of interrupts by looping again.
7241 */
7242 if (tnapi == &tp->napi[1] && tp->rx_refill)
7243 continue;
7244
7245 napi_complete_done(napi, work_done);
7246 /* Reenable interrupts. */
7247 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7248
7249 /* This test here is synchronized by napi_schedule()
7250 * and napi_complete() to close the race condition.
7251 */
7252 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7253 tw32(HOSTCC_MODE, tp->coalesce_mode |
7254 HOSTCC_MODE_ENABLE |
7255 tnapi->coal_now);
7256 }
7257 mmiowb();
7258 break;
7259 }
7260 }
7261
7262 return work_done;
7263
7264 tx_recovery:
7265 /* work_done is guaranteed to be less than budget. */
7266 napi_complete(napi);
7267 tg3_reset_task_schedule(tp);
7268 return work_done;
7269 }
7270
tg3_process_error(struct tg3 * tp)7271 static void tg3_process_error(struct tg3 *tp)
7272 {
7273 u32 val;
7274 bool real_error = false;
7275
7276 if (tg3_flag(tp, ERROR_PROCESSED))
7277 return;
7278
7279 /* Check Flow Attention register */
7280 val = tr32(HOSTCC_FLOW_ATTN);
7281 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7282 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7283 real_error = true;
7284 }
7285
7286 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7287 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7288 real_error = true;
7289 }
7290
7291 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7292 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7293 real_error = true;
7294 }
7295
7296 if (!real_error)
7297 return;
7298
7299 tg3_dump_state(tp);
7300
7301 tg3_flag_set(tp, ERROR_PROCESSED);
7302 tg3_reset_task_schedule(tp);
7303 }
7304
tg3_poll(struct napi_struct * napi,int budget)7305 static int tg3_poll(struct napi_struct *napi, int budget)
7306 {
7307 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7308 struct tg3 *tp = tnapi->tp;
7309 int work_done = 0;
7310 struct tg3_hw_status *sblk = tnapi->hw_status;
7311
7312 while (1) {
7313 if (sblk->status & SD_STATUS_ERROR)
7314 tg3_process_error(tp);
7315
7316 tg3_poll_link(tp);
7317
7318 work_done = tg3_poll_work(tnapi, work_done, budget);
7319
7320 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7321 goto tx_recovery;
7322
7323 if (unlikely(work_done >= budget))
7324 break;
7325
7326 if (tg3_flag(tp, TAGGED_STATUS)) {
7327 /* tp->last_tag is used in tg3_int_reenable() below
7328 * to tell the hw how much work has been processed,
7329 * so we must read it before checking for more work.
7330 */
7331 tnapi->last_tag = sblk->status_tag;
7332 tnapi->last_irq_tag = tnapi->last_tag;
7333 rmb();
7334 } else
7335 sblk->status &= ~SD_STATUS_UPDATED;
7336
7337 if (likely(!tg3_has_work(tnapi))) {
7338 napi_complete_done(napi, work_done);
7339 tg3_int_reenable(tnapi);
7340 break;
7341 }
7342 }
7343
7344 return work_done;
7345
7346 tx_recovery:
7347 /* work_done is guaranteed to be less than budget. */
7348 napi_complete(napi);
7349 tg3_reset_task_schedule(tp);
7350 return work_done;
7351 }
7352
tg3_napi_disable(struct tg3 * tp)7353 static void tg3_napi_disable(struct tg3 *tp)
7354 {
7355 int i;
7356
7357 for (i = tp->irq_cnt - 1; i >= 0; i--)
7358 napi_disable(&tp->napi[i].napi);
7359 }
7360
tg3_napi_enable(struct tg3 * tp)7361 static void tg3_napi_enable(struct tg3 *tp)
7362 {
7363 int i;
7364
7365 for (i = 0; i < tp->irq_cnt; i++)
7366 napi_enable(&tp->napi[i].napi);
7367 }
7368
tg3_napi_init(struct tg3 * tp)7369 static void tg3_napi_init(struct tg3 *tp)
7370 {
7371 int i;
7372
7373 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7374 for (i = 1; i < tp->irq_cnt; i++)
7375 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7376 }
7377
tg3_napi_fini(struct tg3 * tp)7378 static void tg3_napi_fini(struct tg3 *tp)
7379 {
7380 int i;
7381
7382 for (i = 0; i < tp->irq_cnt; i++)
7383 netif_napi_del(&tp->napi[i].napi);
7384 }
7385
tg3_netif_stop(struct tg3 * tp)7386 static inline void tg3_netif_stop(struct tg3 *tp)
7387 {
7388 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7389 tg3_napi_disable(tp);
7390 netif_carrier_off(tp->dev);
7391 netif_tx_disable(tp->dev);
7392 }
7393
7394 /* tp->lock must be held */
tg3_netif_start(struct tg3 * tp)7395 static inline void tg3_netif_start(struct tg3 *tp)
7396 {
7397 tg3_ptp_resume(tp);
7398
7399 /* NOTE: unconditional netif_tx_wake_all_queues is only
7400 * appropriate so long as all callers are assured to
7401 * have free tx slots (such as after tg3_init_hw)
7402 */
7403 netif_tx_wake_all_queues(tp->dev);
7404
7405 if (tp->link_up)
7406 netif_carrier_on(tp->dev);
7407
7408 tg3_napi_enable(tp);
7409 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7410 tg3_enable_ints(tp);
7411 }
7412
tg3_irq_quiesce(struct tg3 * tp)7413 static void tg3_irq_quiesce(struct tg3 *tp)
7414 __releases(tp->lock)
7415 __acquires(tp->lock)
7416 {
7417 int i;
7418
7419 BUG_ON(tp->irq_sync);
7420
7421 tp->irq_sync = 1;
7422 smp_mb();
7423
7424 spin_unlock_bh(&tp->lock);
7425
7426 for (i = 0; i < tp->irq_cnt; i++)
7427 synchronize_irq(tp->napi[i].irq_vec);
7428
7429 spin_lock_bh(&tp->lock);
7430 }
7431
7432 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7433 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7434 * with as well. Most of the time, this is not necessary except when
7435 * shutting down the device.
7436 */
tg3_full_lock(struct tg3 * tp,int irq_sync)7437 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7438 {
7439 spin_lock_bh(&tp->lock);
7440 if (irq_sync)
7441 tg3_irq_quiesce(tp);
7442 }
7443
tg3_full_unlock(struct tg3 * tp)7444 static inline void tg3_full_unlock(struct tg3 *tp)
7445 {
7446 spin_unlock_bh(&tp->lock);
7447 }
7448
7449 /* One-shot MSI handler - Chip automatically disables interrupt
7450 * after sending MSI so driver doesn't have to do it.
7451 */
tg3_msi_1shot(int irq,void * dev_id)7452 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7453 {
7454 struct tg3_napi *tnapi = dev_id;
7455 struct tg3 *tp = tnapi->tp;
7456
7457 prefetch(tnapi->hw_status);
7458 if (tnapi->rx_rcb)
7459 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7460
7461 if (likely(!tg3_irq_sync(tp)))
7462 napi_schedule(&tnapi->napi);
7463
7464 return IRQ_HANDLED;
7465 }
7466
7467 /* MSI ISR - No need to check for interrupt sharing and no need to
7468 * flush status block and interrupt mailbox. PCI ordering rules
7469 * guarantee that MSI will arrive after the status block.
7470 */
tg3_msi(int irq,void * dev_id)7471 static irqreturn_t tg3_msi(int irq, void *dev_id)
7472 {
7473 struct tg3_napi *tnapi = dev_id;
7474 struct tg3 *tp = tnapi->tp;
7475
7476 prefetch(tnapi->hw_status);
7477 if (tnapi->rx_rcb)
7478 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7479 /*
7480 * Writing any value to intr-mbox-0 clears PCI INTA# and
7481 * chip-internal interrupt pending events.
7482 * Writing non-zero to intr-mbox-0 additional tells the
7483 * NIC to stop sending us irqs, engaging "in-intr-handler"
7484 * event coalescing.
7485 */
7486 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7487 if (likely(!tg3_irq_sync(tp)))
7488 napi_schedule(&tnapi->napi);
7489
7490 return IRQ_RETVAL(1);
7491 }
7492
tg3_interrupt(int irq,void * dev_id)7493 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7494 {
7495 struct tg3_napi *tnapi = dev_id;
7496 struct tg3 *tp = tnapi->tp;
7497 struct tg3_hw_status *sblk = tnapi->hw_status;
7498 unsigned int handled = 1;
7499
7500 /* In INTx mode, it is possible for the interrupt to arrive at
7501 * the CPU before the status block posted prior to the interrupt.
7502 * Reading the PCI State register will confirm whether the
7503 * interrupt is ours and will flush the status block.
7504 */
7505 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7506 if (tg3_flag(tp, CHIP_RESETTING) ||
7507 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7508 handled = 0;
7509 goto out;
7510 }
7511 }
7512
7513 /*
7514 * Writing any value to intr-mbox-0 clears PCI INTA# and
7515 * chip-internal interrupt pending events.
7516 * Writing non-zero to intr-mbox-0 additional tells the
7517 * NIC to stop sending us irqs, engaging "in-intr-handler"
7518 * event coalescing.
7519 *
7520 * Flush the mailbox to de-assert the IRQ immediately to prevent
7521 * spurious interrupts. The flush impacts performance but
7522 * excessive spurious interrupts can be worse in some cases.
7523 */
7524 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7525 if (tg3_irq_sync(tp))
7526 goto out;
7527 sblk->status &= ~SD_STATUS_UPDATED;
7528 if (likely(tg3_has_work(tnapi))) {
7529 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7530 napi_schedule(&tnapi->napi);
7531 } else {
7532 /* No work, shared interrupt perhaps? re-enable
7533 * interrupts, and flush that PCI write
7534 */
7535 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7536 0x00000000);
7537 }
7538 out:
7539 return IRQ_RETVAL(handled);
7540 }
7541
tg3_interrupt_tagged(int irq,void * dev_id)7542 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7543 {
7544 struct tg3_napi *tnapi = dev_id;
7545 struct tg3 *tp = tnapi->tp;
7546 struct tg3_hw_status *sblk = tnapi->hw_status;
7547 unsigned int handled = 1;
7548
7549 /* In INTx mode, it is possible for the interrupt to arrive at
7550 * the CPU before the status block posted prior to the interrupt.
7551 * Reading the PCI State register will confirm whether the
7552 * interrupt is ours and will flush the status block.
7553 */
7554 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7555 if (tg3_flag(tp, CHIP_RESETTING) ||
7556 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7557 handled = 0;
7558 goto out;
7559 }
7560 }
7561
7562 /*
7563 * writing any value to intr-mbox-0 clears PCI INTA# and
7564 * chip-internal interrupt pending events.
7565 * writing non-zero to intr-mbox-0 additional tells the
7566 * NIC to stop sending us irqs, engaging "in-intr-handler"
7567 * event coalescing.
7568 *
7569 * Flush the mailbox to de-assert the IRQ immediately to prevent
7570 * spurious interrupts. The flush impacts performance but
7571 * excessive spurious interrupts can be worse in some cases.
7572 */
7573 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7574
7575 /*
7576 * In a shared interrupt configuration, sometimes other devices'
7577 * interrupts will scream. We record the current status tag here
7578 * so that the above check can report that the screaming interrupts
7579 * are unhandled. Eventually they will be silenced.
7580 */
7581 tnapi->last_irq_tag = sblk->status_tag;
7582
7583 if (tg3_irq_sync(tp))
7584 goto out;
7585
7586 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7587
7588 napi_schedule(&tnapi->napi);
7589
7590 out:
7591 return IRQ_RETVAL(handled);
7592 }
7593
7594 /* ISR for interrupt test */
tg3_test_isr(int irq,void * dev_id)7595 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7596 {
7597 struct tg3_napi *tnapi = dev_id;
7598 struct tg3 *tp = tnapi->tp;
7599 struct tg3_hw_status *sblk = tnapi->hw_status;
7600
7601 if ((sblk->status & SD_STATUS_UPDATED) ||
7602 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7603 tg3_disable_ints(tp);
7604 return IRQ_RETVAL(1);
7605 }
7606 return IRQ_RETVAL(0);
7607 }
7608
7609 #ifdef CONFIG_NET_POLL_CONTROLLER
tg3_poll_controller(struct net_device * dev)7610 static void tg3_poll_controller(struct net_device *dev)
7611 {
7612 int i;
7613 struct tg3 *tp = netdev_priv(dev);
7614
7615 if (tg3_irq_sync(tp))
7616 return;
7617
7618 for (i = 0; i < tp->irq_cnt; i++)
7619 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7620 }
7621 #endif
7622
tg3_tx_timeout(struct net_device * dev)7623 static void tg3_tx_timeout(struct net_device *dev)
7624 {
7625 struct tg3 *tp = netdev_priv(dev);
7626
7627 if (netif_msg_tx_err(tp)) {
7628 netdev_err(dev, "transmit timed out, resetting\n");
7629 tg3_dump_state(tp);
7630 }
7631
7632 tg3_reset_task_schedule(tp);
7633 }
7634
7635 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
tg3_4g_overflow_test(dma_addr_t mapping,int len)7636 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7637 {
7638 u32 base = (u32) mapping & 0xffffffff;
7639
7640 return base + len + 8 < base;
7641 }
7642
7643 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7644 * of any 4GB boundaries: 4G, 8G, etc
7645 */
tg3_4g_tso_overflow_test(struct tg3 * tp,dma_addr_t mapping,u32 len,u32 mss)7646 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7647 u32 len, u32 mss)
7648 {
7649 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7650 u32 base = (u32) mapping & 0xffffffff;
7651
7652 return ((base + len + (mss & 0x3fff)) < base);
7653 }
7654 return 0;
7655 }
7656
7657 /* Test for DMA addresses > 40-bit */
tg3_40bit_overflow_test(struct tg3 * tp,dma_addr_t mapping,int len)7658 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7659 int len)
7660 {
7661 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7662 if (tg3_flag(tp, 40BIT_DMA_BUG))
7663 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7664 return 0;
7665 #else
7666 return 0;
7667 #endif
7668 }
7669
tg3_tx_set_bd(struct tg3_tx_buffer_desc * txbd,dma_addr_t mapping,u32 len,u32 flags,u32 mss,u32 vlan)7670 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7671 dma_addr_t mapping, u32 len, u32 flags,
7672 u32 mss, u32 vlan)
7673 {
7674 txbd->addr_hi = ((u64) mapping >> 32);
7675 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7676 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7677 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7678 }
7679
tg3_tx_frag_set(struct tg3_napi * tnapi,u32 * entry,u32 * budget,dma_addr_t map,u32 len,u32 flags,u32 mss,u32 vlan)7680 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7681 dma_addr_t map, u32 len, u32 flags,
7682 u32 mss, u32 vlan)
7683 {
7684 struct tg3 *tp = tnapi->tp;
7685 bool hwbug = false;
7686
7687 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7688 hwbug = true;
7689
7690 if (tg3_4g_overflow_test(map, len))
7691 hwbug = true;
7692
7693 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7694 hwbug = true;
7695
7696 if (tg3_40bit_overflow_test(tp, map, len))
7697 hwbug = true;
7698
7699 if (tp->dma_limit) {
7700 u32 prvidx = *entry;
7701 u32 tmp_flag = flags & ~TXD_FLAG_END;
7702 while (len > tp->dma_limit && *budget) {
7703 u32 frag_len = tp->dma_limit;
7704 len -= tp->dma_limit;
7705
7706 /* Avoid the 8byte DMA problem */
7707 if (len <= 8) {
7708 len += tp->dma_limit / 2;
7709 frag_len = tp->dma_limit / 2;
7710 }
7711
7712 tnapi->tx_buffers[*entry].fragmented = true;
7713
7714 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7715 frag_len, tmp_flag, mss, vlan);
7716 *budget -= 1;
7717 prvidx = *entry;
7718 *entry = NEXT_TX(*entry);
7719
7720 map += frag_len;
7721 }
7722
7723 if (len) {
7724 if (*budget) {
7725 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7726 len, flags, mss, vlan);
7727 *budget -= 1;
7728 *entry = NEXT_TX(*entry);
7729 } else {
7730 hwbug = true;
7731 tnapi->tx_buffers[prvidx].fragmented = false;
7732 }
7733 }
7734 } else {
7735 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7736 len, flags, mss, vlan);
7737 *entry = NEXT_TX(*entry);
7738 }
7739
7740 return hwbug;
7741 }
7742
tg3_tx_skb_unmap(struct tg3_napi * tnapi,u32 entry,int last)7743 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7744 {
7745 int i;
7746 struct sk_buff *skb;
7747 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7748
7749 skb = txb->skb;
7750 txb->skb = NULL;
7751
7752 pci_unmap_single(tnapi->tp->pdev,
7753 dma_unmap_addr(txb, mapping),
7754 skb_headlen(skb),
7755 PCI_DMA_TODEVICE);
7756
7757 while (txb->fragmented) {
7758 txb->fragmented = false;
7759 entry = NEXT_TX(entry);
7760 txb = &tnapi->tx_buffers[entry];
7761 }
7762
7763 for (i = 0; i <= last; i++) {
7764 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7765
7766 entry = NEXT_TX(entry);
7767 txb = &tnapi->tx_buffers[entry];
7768
7769 pci_unmap_page(tnapi->tp->pdev,
7770 dma_unmap_addr(txb, mapping),
7771 skb_frag_size(frag), PCI_DMA_TODEVICE);
7772
7773 while (txb->fragmented) {
7774 txb->fragmented = false;
7775 entry = NEXT_TX(entry);
7776 txb = &tnapi->tx_buffers[entry];
7777 }
7778 }
7779 }
7780
7781 /* Workaround 4GB and 40-bit hardware DMA bugs. */
tigon3_dma_hwbug_workaround(struct tg3_napi * tnapi,struct sk_buff ** pskb,u32 * entry,u32 * budget,u32 base_flags,u32 mss,u32 vlan)7782 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7783 struct sk_buff **pskb,
7784 u32 *entry, u32 *budget,
7785 u32 base_flags, u32 mss, u32 vlan)
7786 {
7787 struct tg3 *tp = tnapi->tp;
7788 struct sk_buff *new_skb, *skb = *pskb;
7789 dma_addr_t new_addr = 0;
7790 int ret = 0;
7791
7792 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7793 new_skb = skb_copy(skb, GFP_ATOMIC);
7794 else {
7795 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7796
7797 new_skb = skb_copy_expand(skb,
7798 skb_headroom(skb) + more_headroom,
7799 skb_tailroom(skb), GFP_ATOMIC);
7800 }
7801
7802 if (!new_skb) {
7803 ret = -1;
7804 } else {
7805 /* New SKB is guaranteed to be linear. */
7806 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7807 PCI_DMA_TODEVICE);
7808 /* Make sure the mapping succeeded */
7809 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7810 dev_kfree_skb_any(new_skb);
7811 ret = -1;
7812 } else {
7813 u32 save_entry = *entry;
7814
7815 base_flags |= TXD_FLAG_END;
7816
7817 tnapi->tx_buffers[*entry].skb = new_skb;
7818 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7819 mapping, new_addr);
7820
7821 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7822 new_skb->len, base_flags,
7823 mss, vlan)) {
7824 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7825 dev_kfree_skb_any(new_skb);
7826 ret = -1;
7827 }
7828 }
7829 }
7830
7831 dev_kfree_skb_any(skb);
7832 *pskb = new_skb;
7833 return ret;
7834 }
7835
tg3_tso_bug_gso_check(struct tg3_napi * tnapi,struct sk_buff * skb)7836 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7837 {
7838 /* Check if we will never have enough descriptors,
7839 * as gso_segs can be more than current ring size
7840 */
7841 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7842 }
7843
7844 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7845
7846 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7847 * indicated in tg3_tx_frag_set()
7848 */
tg3_tso_bug(struct tg3 * tp,struct tg3_napi * tnapi,struct netdev_queue * txq,struct sk_buff * skb)7849 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7850 struct netdev_queue *txq, struct sk_buff *skb)
7851 {
7852 struct sk_buff *segs, *nskb;
7853 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7854
7855 /* Estimate the number of fragments in the worst case */
7856 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7857 netif_tx_stop_queue(txq);
7858
7859 /* netif_tx_stop_queue() must be done before checking
7860 * checking tx index in tg3_tx_avail() below, because in
7861 * tg3_tx(), we update tx index before checking for
7862 * netif_tx_queue_stopped().
7863 */
7864 smp_mb();
7865 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7866 return NETDEV_TX_BUSY;
7867
7868 netif_tx_wake_queue(txq);
7869 }
7870
7871 segs = skb_gso_segment(skb, tp->dev->features &
7872 ~(NETIF_F_TSO | NETIF_F_TSO6));
7873 if (IS_ERR(segs) || !segs)
7874 goto tg3_tso_bug_end;
7875
7876 do {
7877 nskb = segs;
7878 segs = segs->next;
7879 nskb->next = NULL;
7880 tg3_start_xmit(nskb, tp->dev);
7881 } while (segs);
7882
7883 tg3_tso_bug_end:
7884 dev_kfree_skb_any(skb);
7885
7886 return NETDEV_TX_OK;
7887 }
7888
7889 /* hard_start_xmit for all devices */
tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)7890 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7891 {
7892 struct tg3 *tp = netdev_priv(dev);
7893 u32 len, entry, base_flags, mss, vlan = 0;
7894 u32 budget;
7895 int i = -1, would_hit_hwbug;
7896 dma_addr_t mapping;
7897 struct tg3_napi *tnapi;
7898 struct netdev_queue *txq;
7899 unsigned int last;
7900 struct iphdr *iph = NULL;
7901 struct tcphdr *tcph = NULL;
7902 __sum16 tcp_csum = 0, ip_csum = 0;
7903 __be16 ip_tot_len = 0;
7904
7905 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7906 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7907 if (tg3_flag(tp, ENABLE_TSS))
7908 tnapi++;
7909
7910 budget = tg3_tx_avail(tnapi);
7911
7912 /* We are running in BH disabled context with netif_tx_lock
7913 * and TX reclaim runs via tp->napi.poll inside of a software
7914 * interrupt. Furthermore, IRQ processing runs lockless so we have
7915 * no IRQ context deadlocks to worry about either. Rejoice!
7916 */
7917 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7918 if (!netif_tx_queue_stopped(txq)) {
7919 netif_tx_stop_queue(txq);
7920
7921 /* This is a hard error, log it. */
7922 netdev_err(dev,
7923 "BUG! Tx Ring full when queue awake!\n");
7924 }
7925 return NETDEV_TX_BUSY;
7926 }
7927
7928 entry = tnapi->tx_prod;
7929 base_flags = 0;
7930
7931 mss = skb_shinfo(skb)->gso_size;
7932 if (mss) {
7933 u32 tcp_opt_len, hdr_len;
7934
7935 if (skb_cow_head(skb, 0))
7936 goto drop;
7937
7938 iph = ip_hdr(skb);
7939 tcp_opt_len = tcp_optlen(skb);
7940
7941 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7942
7943 /* HW/FW can not correctly segment packets that have been
7944 * vlan encapsulated.
7945 */
7946 if (skb->protocol == htons(ETH_P_8021Q) ||
7947 skb->protocol == htons(ETH_P_8021AD)) {
7948 if (tg3_tso_bug_gso_check(tnapi, skb))
7949 return tg3_tso_bug(tp, tnapi, txq, skb);
7950 goto drop;
7951 }
7952
7953 if (!skb_is_gso_v6(skb)) {
7954 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7955 tg3_flag(tp, TSO_BUG)) {
7956 if (tg3_tso_bug_gso_check(tnapi, skb))
7957 return tg3_tso_bug(tp, tnapi, txq, skb);
7958 goto drop;
7959 }
7960 ip_csum = iph->check;
7961 ip_tot_len = iph->tot_len;
7962 iph->check = 0;
7963 iph->tot_len = htons(mss + hdr_len);
7964 }
7965
7966 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7967 TXD_FLAG_CPU_POST_DMA);
7968
7969 tcph = tcp_hdr(skb);
7970 tcp_csum = tcph->check;
7971
7972 if (tg3_flag(tp, HW_TSO_1) ||
7973 tg3_flag(tp, HW_TSO_2) ||
7974 tg3_flag(tp, HW_TSO_3)) {
7975 tcph->check = 0;
7976 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7977 } else {
7978 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7979 0, IPPROTO_TCP, 0);
7980 }
7981
7982 if (tg3_flag(tp, HW_TSO_3)) {
7983 mss |= (hdr_len & 0xc) << 12;
7984 if (hdr_len & 0x10)
7985 base_flags |= 0x00000010;
7986 base_flags |= (hdr_len & 0x3e0) << 5;
7987 } else if (tg3_flag(tp, HW_TSO_2))
7988 mss |= hdr_len << 9;
7989 else if (tg3_flag(tp, HW_TSO_1) ||
7990 tg3_asic_rev(tp) == ASIC_REV_5705) {
7991 if (tcp_opt_len || iph->ihl > 5) {
7992 int tsflags;
7993
7994 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7995 mss |= (tsflags << 11);
7996 }
7997 } else {
7998 if (tcp_opt_len || iph->ihl > 5) {
7999 int tsflags;
8000
8001 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8002 base_flags |= tsflags << 12;
8003 }
8004 }
8005 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8006 /* HW/FW can not correctly checksum packets that have been
8007 * vlan encapsulated.
8008 */
8009 if (skb->protocol == htons(ETH_P_8021Q) ||
8010 skb->protocol == htons(ETH_P_8021AD)) {
8011 if (skb_checksum_help(skb))
8012 goto drop;
8013 } else {
8014 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8015 }
8016 }
8017
8018 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8019 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8020 base_flags |= TXD_FLAG_JMB_PKT;
8021
8022 if (skb_vlan_tag_present(skb)) {
8023 base_flags |= TXD_FLAG_VLAN;
8024 vlan = skb_vlan_tag_get(skb);
8025 }
8026
8027 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8028 tg3_flag(tp, TX_TSTAMP_EN)) {
8029 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8030 base_flags |= TXD_FLAG_HWTSTAMP;
8031 }
8032
8033 len = skb_headlen(skb);
8034
8035 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8036 if (pci_dma_mapping_error(tp->pdev, mapping))
8037 goto drop;
8038
8039
8040 tnapi->tx_buffers[entry].skb = skb;
8041 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8042
8043 would_hit_hwbug = 0;
8044
8045 if (tg3_flag(tp, 5701_DMA_BUG))
8046 would_hit_hwbug = 1;
8047
8048 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8049 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8050 mss, vlan)) {
8051 would_hit_hwbug = 1;
8052 } else if (skb_shinfo(skb)->nr_frags > 0) {
8053 u32 tmp_mss = mss;
8054
8055 if (!tg3_flag(tp, HW_TSO_1) &&
8056 !tg3_flag(tp, HW_TSO_2) &&
8057 !tg3_flag(tp, HW_TSO_3))
8058 tmp_mss = 0;
8059
8060 /* Now loop through additional data
8061 * fragments, and queue them.
8062 */
8063 last = skb_shinfo(skb)->nr_frags - 1;
8064 for (i = 0; i <= last; i++) {
8065 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8066
8067 len = skb_frag_size(frag);
8068 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8069 len, DMA_TO_DEVICE);
8070
8071 tnapi->tx_buffers[entry].skb = NULL;
8072 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8073 mapping);
8074 if (dma_mapping_error(&tp->pdev->dev, mapping))
8075 goto dma_error;
8076
8077 if (!budget ||
8078 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8079 len, base_flags |
8080 ((i == last) ? TXD_FLAG_END : 0),
8081 tmp_mss, vlan)) {
8082 would_hit_hwbug = 1;
8083 break;
8084 }
8085 }
8086 }
8087
8088 if (would_hit_hwbug) {
8089 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8090
8091 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8092 /* If it's a TSO packet, do GSO instead of
8093 * allocating and copying to a large linear SKB
8094 */
8095 if (ip_tot_len) {
8096 iph->check = ip_csum;
8097 iph->tot_len = ip_tot_len;
8098 }
8099 tcph->check = tcp_csum;
8100 return tg3_tso_bug(tp, tnapi, txq, skb);
8101 }
8102
8103 /* If the workaround fails due to memory/mapping
8104 * failure, silently drop this packet.
8105 */
8106 entry = tnapi->tx_prod;
8107 budget = tg3_tx_avail(tnapi);
8108 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8109 base_flags, mss, vlan))
8110 goto drop_nofree;
8111 }
8112
8113 skb_tx_timestamp(skb);
8114 netdev_tx_sent_queue(txq, skb->len);
8115
8116 /* Sync BD data before updating mailbox */
8117 wmb();
8118
8119 tnapi->tx_prod = entry;
8120 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8121 netif_tx_stop_queue(txq);
8122
8123 /* netif_tx_stop_queue() must be done before checking
8124 * checking tx index in tg3_tx_avail() below, because in
8125 * tg3_tx(), we update tx index before checking for
8126 * netif_tx_queue_stopped().
8127 */
8128 smp_mb();
8129 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8130 netif_tx_wake_queue(txq);
8131 }
8132
8133 if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8134 /* Packets are ready, update Tx producer idx on card. */
8135 tw32_tx_mbox(tnapi->prodmbox, entry);
8136 mmiowb();
8137 }
8138
8139 return NETDEV_TX_OK;
8140
8141 dma_error:
8142 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8143 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8144 drop:
8145 dev_kfree_skb_any(skb);
8146 drop_nofree:
8147 tp->tx_dropped++;
8148 return NETDEV_TX_OK;
8149 }
8150
tg3_mac_loopback(struct tg3 * tp,bool enable)8151 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8152 {
8153 if (enable) {
8154 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8155 MAC_MODE_PORT_MODE_MASK);
8156
8157 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8158
8159 if (!tg3_flag(tp, 5705_PLUS))
8160 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8161
8162 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8163 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8164 else
8165 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8166 } else {
8167 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8168
8169 if (tg3_flag(tp, 5705_PLUS) ||
8170 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8171 tg3_asic_rev(tp) == ASIC_REV_5700)
8172 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8173 }
8174
8175 tw32(MAC_MODE, tp->mac_mode);
8176 udelay(40);
8177 }
8178
tg3_phy_lpbk_set(struct tg3 * tp,u32 speed,bool extlpbk)8179 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8180 {
8181 u32 val, bmcr, mac_mode, ptest = 0;
8182
8183 tg3_phy_toggle_apd(tp, false);
8184 tg3_phy_toggle_automdix(tp, false);
8185
8186 if (extlpbk && tg3_phy_set_extloopbk(tp))
8187 return -EIO;
8188
8189 bmcr = BMCR_FULLDPLX;
8190 switch (speed) {
8191 case SPEED_10:
8192 break;
8193 case SPEED_100:
8194 bmcr |= BMCR_SPEED100;
8195 break;
8196 case SPEED_1000:
8197 default:
8198 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8199 speed = SPEED_100;
8200 bmcr |= BMCR_SPEED100;
8201 } else {
8202 speed = SPEED_1000;
8203 bmcr |= BMCR_SPEED1000;
8204 }
8205 }
8206
8207 if (extlpbk) {
8208 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8209 tg3_readphy(tp, MII_CTRL1000, &val);
8210 val |= CTL1000_AS_MASTER |
8211 CTL1000_ENABLE_MASTER;
8212 tg3_writephy(tp, MII_CTRL1000, val);
8213 } else {
8214 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8215 MII_TG3_FET_PTEST_TRIM_2;
8216 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8217 }
8218 } else
8219 bmcr |= BMCR_LOOPBACK;
8220
8221 tg3_writephy(tp, MII_BMCR, bmcr);
8222
8223 /* The write needs to be flushed for the FETs */
8224 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8225 tg3_readphy(tp, MII_BMCR, &bmcr);
8226
8227 udelay(40);
8228
8229 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8230 tg3_asic_rev(tp) == ASIC_REV_5785) {
8231 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8232 MII_TG3_FET_PTEST_FRC_TX_LINK |
8233 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8234
8235 /* The write needs to be flushed for the AC131 */
8236 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8237 }
8238
8239 /* Reset to prevent losing 1st rx packet intermittently */
8240 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8241 tg3_flag(tp, 5780_CLASS)) {
8242 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8243 udelay(10);
8244 tw32_f(MAC_RX_MODE, tp->rx_mode);
8245 }
8246
8247 mac_mode = tp->mac_mode &
8248 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8249 if (speed == SPEED_1000)
8250 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8251 else
8252 mac_mode |= MAC_MODE_PORT_MODE_MII;
8253
8254 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8255 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8256
8257 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8258 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8259 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8260 mac_mode |= MAC_MODE_LINK_POLARITY;
8261
8262 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8263 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8264 }
8265
8266 tw32(MAC_MODE, mac_mode);
8267 udelay(40);
8268
8269 return 0;
8270 }
8271
tg3_set_loopback(struct net_device * dev,netdev_features_t features)8272 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8273 {
8274 struct tg3 *tp = netdev_priv(dev);
8275
8276 if (features & NETIF_F_LOOPBACK) {
8277 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8278 return;
8279
8280 spin_lock_bh(&tp->lock);
8281 tg3_mac_loopback(tp, true);
8282 netif_carrier_on(tp->dev);
8283 spin_unlock_bh(&tp->lock);
8284 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8285 } else {
8286 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8287 return;
8288
8289 spin_lock_bh(&tp->lock);
8290 tg3_mac_loopback(tp, false);
8291 /* Force link status check */
8292 tg3_setup_phy(tp, true);
8293 spin_unlock_bh(&tp->lock);
8294 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8295 }
8296 }
8297
tg3_fix_features(struct net_device * dev,netdev_features_t features)8298 static netdev_features_t tg3_fix_features(struct net_device *dev,
8299 netdev_features_t features)
8300 {
8301 struct tg3 *tp = netdev_priv(dev);
8302
8303 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8304 features &= ~NETIF_F_ALL_TSO;
8305
8306 return features;
8307 }
8308
tg3_set_features(struct net_device * dev,netdev_features_t features)8309 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8310 {
8311 netdev_features_t changed = dev->features ^ features;
8312
8313 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8314 tg3_set_loopback(dev, features);
8315
8316 return 0;
8317 }
8318
tg3_rx_prodring_free(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8319 static void tg3_rx_prodring_free(struct tg3 *tp,
8320 struct tg3_rx_prodring_set *tpr)
8321 {
8322 int i;
8323
8324 if (tpr != &tp->napi[0].prodring) {
8325 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8326 i = (i + 1) & tp->rx_std_ring_mask)
8327 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8328 tp->rx_pkt_map_sz);
8329
8330 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8331 for (i = tpr->rx_jmb_cons_idx;
8332 i != tpr->rx_jmb_prod_idx;
8333 i = (i + 1) & tp->rx_jmb_ring_mask) {
8334 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8335 TG3_RX_JMB_MAP_SZ);
8336 }
8337 }
8338
8339 return;
8340 }
8341
8342 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8343 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8344 tp->rx_pkt_map_sz);
8345
8346 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8347 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8348 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8349 TG3_RX_JMB_MAP_SZ);
8350 }
8351 }
8352
8353 /* Initialize rx rings for packet processing.
8354 *
8355 * The chip has been shut down and the driver detached from
8356 * the networking, so no interrupts or new tx packets will
8357 * end up in the driver. tp->{tx,}lock are held and thus
8358 * we may not sleep.
8359 */
tg3_rx_prodring_alloc(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8360 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8361 struct tg3_rx_prodring_set *tpr)
8362 {
8363 u32 i, rx_pkt_dma_sz;
8364
8365 tpr->rx_std_cons_idx = 0;
8366 tpr->rx_std_prod_idx = 0;
8367 tpr->rx_jmb_cons_idx = 0;
8368 tpr->rx_jmb_prod_idx = 0;
8369
8370 if (tpr != &tp->napi[0].prodring) {
8371 memset(&tpr->rx_std_buffers[0], 0,
8372 TG3_RX_STD_BUFF_RING_SIZE(tp));
8373 if (tpr->rx_jmb_buffers)
8374 memset(&tpr->rx_jmb_buffers[0], 0,
8375 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8376 goto done;
8377 }
8378
8379 /* Zero out all descriptors. */
8380 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8381
8382 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8383 if (tg3_flag(tp, 5780_CLASS) &&
8384 tp->dev->mtu > ETH_DATA_LEN)
8385 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8386 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8387
8388 /* Initialize invariants of the rings, we only set this
8389 * stuff once. This works because the card does not
8390 * write into the rx buffer posting rings.
8391 */
8392 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8393 struct tg3_rx_buffer_desc *rxd;
8394
8395 rxd = &tpr->rx_std[i];
8396 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8397 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8398 rxd->opaque = (RXD_OPAQUE_RING_STD |
8399 (i << RXD_OPAQUE_INDEX_SHIFT));
8400 }
8401
8402 /* Now allocate fresh SKBs for each rx ring. */
8403 for (i = 0; i < tp->rx_pending; i++) {
8404 unsigned int frag_size;
8405
8406 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8407 &frag_size) < 0) {
8408 netdev_warn(tp->dev,
8409 "Using a smaller RX standard ring. Only "
8410 "%d out of %d buffers were allocated "
8411 "successfully\n", i, tp->rx_pending);
8412 if (i == 0)
8413 goto initfail;
8414 tp->rx_pending = i;
8415 break;
8416 }
8417 }
8418
8419 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8420 goto done;
8421
8422 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8423
8424 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8425 goto done;
8426
8427 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8428 struct tg3_rx_buffer_desc *rxd;
8429
8430 rxd = &tpr->rx_jmb[i].std;
8431 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8432 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8433 RXD_FLAG_JUMBO;
8434 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8435 (i << RXD_OPAQUE_INDEX_SHIFT));
8436 }
8437
8438 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8439 unsigned int frag_size;
8440
8441 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8442 &frag_size) < 0) {
8443 netdev_warn(tp->dev,
8444 "Using a smaller RX jumbo ring. Only %d "
8445 "out of %d buffers were allocated "
8446 "successfully\n", i, tp->rx_jumbo_pending);
8447 if (i == 0)
8448 goto initfail;
8449 tp->rx_jumbo_pending = i;
8450 break;
8451 }
8452 }
8453
8454 done:
8455 return 0;
8456
8457 initfail:
8458 tg3_rx_prodring_free(tp, tpr);
8459 return -ENOMEM;
8460 }
8461
tg3_rx_prodring_fini(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8462 static void tg3_rx_prodring_fini(struct tg3 *tp,
8463 struct tg3_rx_prodring_set *tpr)
8464 {
8465 kfree(tpr->rx_std_buffers);
8466 tpr->rx_std_buffers = NULL;
8467 kfree(tpr->rx_jmb_buffers);
8468 tpr->rx_jmb_buffers = NULL;
8469 if (tpr->rx_std) {
8470 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8471 tpr->rx_std, tpr->rx_std_mapping);
8472 tpr->rx_std = NULL;
8473 }
8474 if (tpr->rx_jmb) {
8475 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8476 tpr->rx_jmb, tpr->rx_jmb_mapping);
8477 tpr->rx_jmb = NULL;
8478 }
8479 }
8480
tg3_rx_prodring_init(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8481 static int tg3_rx_prodring_init(struct tg3 *tp,
8482 struct tg3_rx_prodring_set *tpr)
8483 {
8484 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8485 GFP_KERNEL);
8486 if (!tpr->rx_std_buffers)
8487 return -ENOMEM;
8488
8489 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8490 TG3_RX_STD_RING_BYTES(tp),
8491 &tpr->rx_std_mapping,
8492 GFP_KERNEL);
8493 if (!tpr->rx_std)
8494 goto err_out;
8495
8496 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8497 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8498 GFP_KERNEL);
8499 if (!tpr->rx_jmb_buffers)
8500 goto err_out;
8501
8502 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8503 TG3_RX_JMB_RING_BYTES(tp),
8504 &tpr->rx_jmb_mapping,
8505 GFP_KERNEL);
8506 if (!tpr->rx_jmb)
8507 goto err_out;
8508 }
8509
8510 return 0;
8511
8512 err_out:
8513 tg3_rx_prodring_fini(tp, tpr);
8514 return -ENOMEM;
8515 }
8516
8517 /* Free up pending packets in all rx/tx rings.
8518 *
8519 * The chip has been shut down and the driver detached from
8520 * the networking, so no interrupts or new tx packets will
8521 * end up in the driver. tp->{tx,}lock is not held and we are not
8522 * in an interrupt context and thus may sleep.
8523 */
tg3_free_rings(struct tg3 * tp)8524 static void tg3_free_rings(struct tg3 *tp)
8525 {
8526 int i, j;
8527
8528 for (j = 0; j < tp->irq_cnt; j++) {
8529 struct tg3_napi *tnapi = &tp->napi[j];
8530
8531 tg3_rx_prodring_free(tp, &tnapi->prodring);
8532
8533 if (!tnapi->tx_buffers)
8534 continue;
8535
8536 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8537 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8538
8539 if (!skb)
8540 continue;
8541
8542 tg3_tx_skb_unmap(tnapi, i,
8543 skb_shinfo(skb)->nr_frags - 1);
8544
8545 dev_kfree_skb_any(skb);
8546 }
8547 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8548 }
8549 }
8550
8551 /* Initialize tx/rx rings for packet processing.
8552 *
8553 * The chip has been shut down and the driver detached from
8554 * the networking, so no interrupts or new tx packets will
8555 * end up in the driver. tp->{tx,}lock are held and thus
8556 * we may not sleep.
8557 */
tg3_init_rings(struct tg3 * tp)8558 static int tg3_init_rings(struct tg3 *tp)
8559 {
8560 int i;
8561
8562 /* Free up all the SKBs. */
8563 tg3_free_rings(tp);
8564
8565 for (i = 0; i < tp->irq_cnt; i++) {
8566 struct tg3_napi *tnapi = &tp->napi[i];
8567
8568 tnapi->last_tag = 0;
8569 tnapi->last_irq_tag = 0;
8570 tnapi->hw_status->status = 0;
8571 tnapi->hw_status->status_tag = 0;
8572 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8573
8574 tnapi->tx_prod = 0;
8575 tnapi->tx_cons = 0;
8576 if (tnapi->tx_ring)
8577 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8578
8579 tnapi->rx_rcb_ptr = 0;
8580 if (tnapi->rx_rcb)
8581 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8582
8583 if (tnapi->prodring.rx_std &&
8584 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8585 tg3_free_rings(tp);
8586 return -ENOMEM;
8587 }
8588 }
8589
8590 return 0;
8591 }
8592
tg3_mem_tx_release(struct tg3 * tp)8593 static void tg3_mem_tx_release(struct tg3 *tp)
8594 {
8595 int i;
8596
8597 for (i = 0; i < tp->irq_max; i++) {
8598 struct tg3_napi *tnapi = &tp->napi[i];
8599
8600 if (tnapi->tx_ring) {
8601 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8602 tnapi->tx_ring, tnapi->tx_desc_mapping);
8603 tnapi->tx_ring = NULL;
8604 }
8605
8606 kfree(tnapi->tx_buffers);
8607 tnapi->tx_buffers = NULL;
8608 }
8609 }
8610
tg3_mem_tx_acquire(struct tg3 * tp)8611 static int tg3_mem_tx_acquire(struct tg3 *tp)
8612 {
8613 int i;
8614 struct tg3_napi *tnapi = &tp->napi[0];
8615
8616 /* If multivector TSS is enabled, vector 0 does not handle
8617 * tx interrupts. Don't allocate any resources for it.
8618 */
8619 if (tg3_flag(tp, ENABLE_TSS))
8620 tnapi++;
8621
8622 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8623 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8624 TG3_TX_RING_SIZE, GFP_KERNEL);
8625 if (!tnapi->tx_buffers)
8626 goto err_out;
8627
8628 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8629 TG3_TX_RING_BYTES,
8630 &tnapi->tx_desc_mapping,
8631 GFP_KERNEL);
8632 if (!tnapi->tx_ring)
8633 goto err_out;
8634 }
8635
8636 return 0;
8637
8638 err_out:
8639 tg3_mem_tx_release(tp);
8640 return -ENOMEM;
8641 }
8642
tg3_mem_rx_release(struct tg3 * tp)8643 static void tg3_mem_rx_release(struct tg3 *tp)
8644 {
8645 int i;
8646
8647 for (i = 0; i < tp->irq_max; i++) {
8648 struct tg3_napi *tnapi = &tp->napi[i];
8649
8650 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8651
8652 if (!tnapi->rx_rcb)
8653 continue;
8654
8655 dma_free_coherent(&tp->pdev->dev,
8656 TG3_RX_RCB_RING_BYTES(tp),
8657 tnapi->rx_rcb,
8658 tnapi->rx_rcb_mapping);
8659 tnapi->rx_rcb = NULL;
8660 }
8661 }
8662
tg3_mem_rx_acquire(struct tg3 * tp)8663 static int tg3_mem_rx_acquire(struct tg3 *tp)
8664 {
8665 unsigned int i, limit;
8666
8667 limit = tp->rxq_cnt;
8668
8669 /* If RSS is enabled, we need a (dummy) producer ring
8670 * set on vector zero. This is the true hw prodring.
8671 */
8672 if (tg3_flag(tp, ENABLE_RSS))
8673 limit++;
8674
8675 for (i = 0; i < limit; i++) {
8676 struct tg3_napi *tnapi = &tp->napi[i];
8677
8678 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8679 goto err_out;
8680
8681 /* If multivector RSS is enabled, vector 0
8682 * does not handle rx or tx interrupts.
8683 * Don't allocate any resources for it.
8684 */
8685 if (!i && tg3_flag(tp, ENABLE_RSS))
8686 continue;
8687
8688 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8689 TG3_RX_RCB_RING_BYTES(tp),
8690 &tnapi->rx_rcb_mapping,
8691 GFP_KERNEL);
8692 if (!tnapi->rx_rcb)
8693 goto err_out;
8694 }
8695
8696 return 0;
8697
8698 err_out:
8699 tg3_mem_rx_release(tp);
8700 return -ENOMEM;
8701 }
8702
8703 /*
8704 * Must not be invoked with interrupt sources disabled and
8705 * the hardware shutdown down.
8706 */
tg3_free_consistent(struct tg3 * tp)8707 static void tg3_free_consistent(struct tg3 *tp)
8708 {
8709 int i;
8710
8711 for (i = 0; i < tp->irq_cnt; i++) {
8712 struct tg3_napi *tnapi = &tp->napi[i];
8713
8714 if (tnapi->hw_status) {
8715 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8716 tnapi->hw_status,
8717 tnapi->status_mapping);
8718 tnapi->hw_status = NULL;
8719 }
8720 }
8721
8722 tg3_mem_rx_release(tp);
8723 tg3_mem_tx_release(tp);
8724
8725 /* tp->hw_stats can be referenced safely:
8726 * 1. under rtnl_lock
8727 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8728 */
8729 if (tp->hw_stats) {
8730 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8731 tp->hw_stats, tp->stats_mapping);
8732 tp->hw_stats = NULL;
8733 }
8734 }
8735
8736 /*
8737 * Must not be invoked with interrupt sources disabled and
8738 * the hardware shutdown down. Can sleep.
8739 */
tg3_alloc_consistent(struct tg3 * tp)8740 static int tg3_alloc_consistent(struct tg3 *tp)
8741 {
8742 int i;
8743
8744 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8745 sizeof(struct tg3_hw_stats),
8746 &tp->stats_mapping, GFP_KERNEL);
8747 if (!tp->hw_stats)
8748 goto err_out;
8749
8750 for (i = 0; i < tp->irq_cnt; i++) {
8751 struct tg3_napi *tnapi = &tp->napi[i];
8752 struct tg3_hw_status *sblk;
8753
8754 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8755 TG3_HW_STATUS_SIZE,
8756 &tnapi->status_mapping,
8757 GFP_KERNEL);
8758 if (!tnapi->hw_status)
8759 goto err_out;
8760
8761 sblk = tnapi->hw_status;
8762
8763 if (tg3_flag(tp, ENABLE_RSS)) {
8764 u16 *prodptr = NULL;
8765
8766 /*
8767 * When RSS is enabled, the status block format changes
8768 * slightly. The "rx_jumbo_consumer", "reserved",
8769 * and "rx_mini_consumer" members get mapped to the
8770 * other three rx return ring producer indexes.
8771 */
8772 switch (i) {
8773 case 1:
8774 prodptr = &sblk->idx[0].rx_producer;
8775 break;
8776 case 2:
8777 prodptr = &sblk->rx_jumbo_consumer;
8778 break;
8779 case 3:
8780 prodptr = &sblk->reserved;
8781 break;
8782 case 4:
8783 prodptr = &sblk->rx_mini_consumer;
8784 break;
8785 }
8786 tnapi->rx_rcb_prod_idx = prodptr;
8787 } else {
8788 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8789 }
8790 }
8791
8792 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8793 goto err_out;
8794
8795 return 0;
8796
8797 err_out:
8798 tg3_free_consistent(tp);
8799 return -ENOMEM;
8800 }
8801
8802 #define MAX_WAIT_CNT 1000
8803
8804 /* To stop a block, clear the enable bit and poll till it
8805 * clears. tp->lock is held.
8806 */
tg3_stop_block(struct tg3 * tp,unsigned long ofs,u32 enable_bit,bool silent)8807 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8808 {
8809 unsigned int i;
8810 u32 val;
8811
8812 if (tg3_flag(tp, 5705_PLUS)) {
8813 switch (ofs) {
8814 case RCVLSC_MODE:
8815 case DMAC_MODE:
8816 case MBFREE_MODE:
8817 case BUFMGR_MODE:
8818 case MEMARB_MODE:
8819 /* We can't enable/disable these bits of the
8820 * 5705/5750, just say success.
8821 */
8822 return 0;
8823
8824 default:
8825 break;
8826 }
8827 }
8828
8829 val = tr32(ofs);
8830 val &= ~enable_bit;
8831 tw32_f(ofs, val);
8832
8833 for (i = 0; i < MAX_WAIT_CNT; i++) {
8834 if (pci_channel_offline(tp->pdev)) {
8835 dev_err(&tp->pdev->dev,
8836 "tg3_stop_block device offline, "
8837 "ofs=%lx enable_bit=%x\n",
8838 ofs, enable_bit);
8839 return -ENODEV;
8840 }
8841
8842 udelay(100);
8843 val = tr32(ofs);
8844 if ((val & enable_bit) == 0)
8845 break;
8846 }
8847
8848 if (i == MAX_WAIT_CNT && !silent) {
8849 dev_err(&tp->pdev->dev,
8850 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8851 ofs, enable_bit);
8852 return -ENODEV;
8853 }
8854
8855 return 0;
8856 }
8857
8858 /* tp->lock is held. */
tg3_abort_hw(struct tg3 * tp,bool silent)8859 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8860 {
8861 int i, err;
8862
8863 tg3_disable_ints(tp);
8864
8865 if (pci_channel_offline(tp->pdev)) {
8866 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8867 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8868 err = -ENODEV;
8869 goto err_no_dev;
8870 }
8871
8872 tp->rx_mode &= ~RX_MODE_ENABLE;
8873 tw32_f(MAC_RX_MODE, tp->rx_mode);
8874 udelay(10);
8875
8876 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8877 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8878 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8879 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8880 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8881 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8882
8883 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8884 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8885 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8886 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8887 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8888 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8889 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8890
8891 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8892 tw32_f(MAC_MODE, tp->mac_mode);
8893 udelay(40);
8894
8895 tp->tx_mode &= ~TX_MODE_ENABLE;
8896 tw32_f(MAC_TX_MODE, tp->tx_mode);
8897
8898 for (i = 0; i < MAX_WAIT_CNT; i++) {
8899 udelay(100);
8900 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8901 break;
8902 }
8903 if (i >= MAX_WAIT_CNT) {
8904 dev_err(&tp->pdev->dev,
8905 "%s timed out, TX_MODE_ENABLE will not clear "
8906 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8907 err |= -ENODEV;
8908 }
8909
8910 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8911 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8912 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8913
8914 tw32(FTQ_RESET, 0xffffffff);
8915 tw32(FTQ_RESET, 0x00000000);
8916
8917 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8918 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8919
8920 err_no_dev:
8921 for (i = 0; i < tp->irq_cnt; i++) {
8922 struct tg3_napi *tnapi = &tp->napi[i];
8923 if (tnapi->hw_status)
8924 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8925 }
8926
8927 return err;
8928 }
8929
8930 /* Save PCI command register before chip reset */
tg3_save_pci_state(struct tg3 * tp)8931 static void tg3_save_pci_state(struct tg3 *tp)
8932 {
8933 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8934 }
8935
8936 /* Restore PCI state after chip reset */
tg3_restore_pci_state(struct tg3 * tp)8937 static void tg3_restore_pci_state(struct tg3 *tp)
8938 {
8939 u32 val;
8940
8941 /* Re-enable indirect register accesses. */
8942 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8943 tp->misc_host_ctrl);
8944
8945 /* Set MAX PCI retry to zero. */
8946 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8947 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8948 tg3_flag(tp, PCIX_MODE))
8949 val |= PCISTATE_RETRY_SAME_DMA;
8950 /* Allow reads and writes to the APE register and memory space. */
8951 if (tg3_flag(tp, ENABLE_APE))
8952 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8953 PCISTATE_ALLOW_APE_SHMEM_WR |
8954 PCISTATE_ALLOW_APE_PSPACE_WR;
8955 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8956
8957 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8958
8959 if (!tg3_flag(tp, PCI_EXPRESS)) {
8960 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8961 tp->pci_cacheline_sz);
8962 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8963 tp->pci_lat_timer);
8964 }
8965
8966 /* Make sure PCI-X relaxed ordering bit is clear. */
8967 if (tg3_flag(tp, PCIX_MODE)) {
8968 u16 pcix_cmd;
8969
8970 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8971 &pcix_cmd);
8972 pcix_cmd &= ~PCI_X_CMD_ERO;
8973 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8974 pcix_cmd);
8975 }
8976
8977 if (tg3_flag(tp, 5780_CLASS)) {
8978
8979 /* Chip reset on 5780 will reset MSI enable bit,
8980 * so need to restore it.
8981 */
8982 if (tg3_flag(tp, USING_MSI)) {
8983 u16 ctrl;
8984
8985 pci_read_config_word(tp->pdev,
8986 tp->msi_cap + PCI_MSI_FLAGS,
8987 &ctrl);
8988 pci_write_config_word(tp->pdev,
8989 tp->msi_cap + PCI_MSI_FLAGS,
8990 ctrl | PCI_MSI_FLAGS_ENABLE);
8991 val = tr32(MSGINT_MODE);
8992 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8993 }
8994 }
8995 }
8996
tg3_override_clk(struct tg3 * tp)8997 static void tg3_override_clk(struct tg3 *tp)
8998 {
8999 u32 val;
9000
9001 switch (tg3_asic_rev(tp)) {
9002 case ASIC_REV_5717:
9003 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9004 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9005 TG3_CPMU_MAC_ORIDE_ENABLE);
9006 break;
9007
9008 case ASIC_REV_5719:
9009 case ASIC_REV_5720:
9010 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9011 break;
9012
9013 default:
9014 return;
9015 }
9016 }
9017
tg3_restore_clk(struct tg3 * tp)9018 static void tg3_restore_clk(struct tg3 *tp)
9019 {
9020 u32 val;
9021
9022 switch (tg3_asic_rev(tp)) {
9023 case ASIC_REV_5717:
9024 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9025 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9026 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9027 break;
9028
9029 case ASIC_REV_5719:
9030 case ASIC_REV_5720:
9031 val = tr32(TG3_CPMU_CLCK_ORIDE);
9032 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9033 break;
9034
9035 default:
9036 return;
9037 }
9038 }
9039
9040 /* tp->lock is held. */
tg3_chip_reset(struct tg3 * tp)9041 static int tg3_chip_reset(struct tg3 *tp)
9042 __releases(tp->lock)
9043 __acquires(tp->lock)
9044 {
9045 u32 val;
9046 void (*write_op)(struct tg3 *, u32, u32);
9047 int i, err;
9048
9049 if (!pci_device_is_present(tp->pdev))
9050 return -ENODEV;
9051
9052 tg3_nvram_lock(tp);
9053
9054 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9055
9056 /* No matching tg3_nvram_unlock() after this because
9057 * chip reset below will undo the nvram lock.
9058 */
9059 tp->nvram_lock_cnt = 0;
9060
9061 /* GRC_MISC_CFG core clock reset will clear the memory
9062 * enable bit in PCI register 4 and the MSI enable bit
9063 * on some chips, so we save relevant registers here.
9064 */
9065 tg3_save_pci_state(tp);
9066
9067 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9068 tg3_flag(tp, 5755_PLUS))
9069 tw32(GRC_FASTBOOT_PC, 0);
9070
9071 /*
9072 * We must avoid the readl() that normally takes place.
9073 * It locks machines, causes machine checks, and other
9074 * fun things. So, temporarily disable the 5701
9075 * hardware workaround, while we do the reset.
9076 */
9077 write_op = tp->write32;
9078 if (write_op == tg3_write_flush_reg32)
9079 tp->write32 = tg3_write32;
9080
9081 /* Prevent the irq handler from reading or writing PCI registers
9082 * during chip reset when the memory enable bit in the PCI command
9083 * register may be cleared. The chip does not generate interrupt
9084 * at this time, but the irq handler may still be called due to irq
9085 * sharing or irqpoll.
9086 */
9087 tg3_flag_set(tp, CHIP_RESETTING);
9088 for (i = 0; i < tp->irq_cnt; i++) {
9089 struct tg3_napi *tnapi = &tp->napi[i];
9090 if (tnapi->hw_status) {
9091 tnapi->hw_status->status = 0;
9092 tnapi->hw_status->status_tag = 0;
9093 }
9094 tnapi->last_tag = 0;
9095 tnapi->last_irq_tag = 0;
9096 }
9097 smp_mb();
9098
9099 tg3_full_unlock(tp);
9100
9101 for (i = 0; i < tp->irq_cnt; i++)
9102 synchronize_irq(tp->napi[i].irq_vec);
9103
9104 tg3_full_lock(tp, 0);
9105
9106 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9107 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9108 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9109 }
9110
9111 /* do the reset */
9112 val = GRC_MISC_CFG_CORECLK_RESET;
9113
9114 if (tg3_flag(tp, PCI_EXPRESS)) {
9115 /* Force PCIe 1.0a mode */
9116 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9117 !tg3_flag(tp, 57765_PLUS) &&
9118 tr32(TG3_PCIE_PHY_TSTCTL) ==
9119 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9120 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9121
9122 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9123 tw32(GRC_MISC_CFG, (1 << 29));
9124 val |= (1 << 29);
9125 }
9126 }
9127
9128 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9129 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9130 tw32(GRC_VCPU_EXT_CTRL,
9131 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9132 }
9133
9134 /* Set the clock to the highest frequency to avoid timeouts. With link
9135 * aware mode, the clock speed could be slow and bootcode does not
9136 * complete within the expected time. Override the clock to allow the
9137 * bootcode to finish sooner and then restore it.
9138 */
9139 tg3_override_clk(tp);
9140
9141 /* Manage gphy power for all CPMU absent PCIe devices. */
9142 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9143 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9144
9145 tw32(GRC_MISC_CFG, val);
9146
9147 /* restore 5701 hardware bug workaround write method */
9148 tp->write32 = write_op;
9149
9150 /* Unfortunately, we have to delay before the PCI read back.
9151 * Some 575X chips even will not respond to a PCI cfg access
9152 * when the reset command is given to the chip.
9153 *
9154 * How do these hardware designers expect things to work
9155 * properly if the PCI write is posted for a long period
9156 * of time? It is always necessary to have some method by
9157 * which a register read back can occur to push the write
9158 * out which does the reset.
9159 *
9160 * For most tg3 variants the trick below was working.
9161 * Ho hum...
9162 */
9163 udelay(120);
9164
9165 /* Flush PCI posted writes. The normal MMIO registers
9166 * are inaccessible at this time so this is the only
9167 * way to make this reliably (actually, this is no longer
9168 * the case, see above). I tried to use indirect
9169 * register read/write but this upset some 5701 variants.
9170 */
9171 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9172
9173 udelay(120);
9174
9175 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9176 u16 val16;
9177
9178 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9179 int j;
9180 u32 cfg_val;
9181
9182 /* Wait for link training to complete. */
9183 for (j = 0; j < 5000; j++)
9184 udelay(100);
9185
9186 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9187 pci_write_config_dword(tp->pdev, 0xc4,
9188 cfg_val | (1 << 15));
9189 }
9190
9191 /* Clear the "no snoop" and "relaxed ordering" bits. */
9192 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9193 /*
9194 * Older PCIe devices only support the 128 byte
9195 * MPS setting. Enforce the restriction.
9196 */
9197 if (!tg3_flag(tp, CPMU_PRESENT))
9198 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9199 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9200
9201 /* Clear error status */
9202 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9203 PCI_EXP_DEVSTA_CED |
9204 PCI_EXP_DEVSTA_NFED |
9205 PCI_EXP_DEVSTA_FED |
9206 PCI_EXP_DEVSTA_URD);
9207 }
9208
9209 tg3_restore_pci_state(tp);
9210
9211 tg3_flag_clear(tp, CHIP_RESETTING);
9212 tg3_flag_clear(tp, ERROR_PROCESSED);
9213
9214 val = 0;
9215 if (tg3_flag(tp, 5780_CLASS))
9216 val = tr32(MEMARB_MODE);
9217 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9218
9219 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9220 tg3_stop_fw(tp);
9221 tw32(0x5000, 0x400);
9222 }
9223
9224 if (tg3_flag(tp, IS_SSB_CORE)) {
9225 /*
9226 * BCM4785: In order to avoid repercussions from using
9227 * potentially defective internal ROM, stop the Rx RISC CPU,
9228 * which is not required.
9229 */
9230 tg3_stop_fw(tp);
9231 tg3_halt_cpu(tp, RX_CPU_BASE);
9232 }
9233
9234 err = tg3_poll_fw(tp);
9235 if (err)
9236 return err;
9237
9238 tw32(GRC_MODE, tp->grc_mode);
9239
9240 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9241 val = tr32(0xc4);
9242
9243 tw32(0xc4, val | (1 << 15));
9244 }
9245
9246 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9247 tg3_asic_rev(tp) == ASIC_REV_5705) {
9248 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9249 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9250 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9251 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9252 }
9253
9254 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9255 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9256 val = tp->mac_mode;
9257 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9258 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9259 val = tp->mac_mode;
9260 } else
9261 val = 0;
9262
9263 tw32_f(MAC_MODE, val);
9264 udelay(40);
9265
9266 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9267
9268 tg3_mdio_start(tp);
9269
9270 if (tg3_flag(tp, PCI_EXPRESS) &&
9271 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9272 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9273 !tg3_flag(tp, 57765_PLUS)) {
9274 val = tr32(0x7c00);
9275
9276 tw32(0x7c00, val | (1 << 25));
9277 }
9278
9279 tg3_restore_clk(tp);
9280
9281 /* Increase the core clock speed to fix tx timeout issue for 5762
9282 * with 100Mbps link speed.
9283 */
9284 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9285 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9286 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9287 TG3_CPMU_MAC_ORIDE_ENABLE);
9288 }
9289
9290 /* Reprobe ASF enable state. */
9291 tg3_flag_clear(tp, ENABLE_ASF);
9292 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9293 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9294
9295 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9296 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9297 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9298 u32 nic_cfg;
9299
9300 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9301 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9302 tg3_flag_set(tp, ENABLE_ASF);
9303 tp->last_event_jiffies = jiffies;
9304 if (tg3_flag(tp, 5750_PLUS))
9305 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9306
9307 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9308 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9309 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9310 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9311 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9312 }
9313 }
9314
9315 return 0;
9316 }
9317
9318 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9319 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9320 static void __tg3_set_rx_mode(struct net_device *);
9321
9322 /* tp->lock is held. */
tg3_halt(struct tg3 * tp,int kind,bool silent)9323 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9324 {
9325 int err;
9326
9327 tg3_stop_fw(tp);
9328
9329 tg3_write_sig_pre_reset(tp, kind);
9330
9331 tg3_abort_hw(tp, silent);
9332 err = tg3_chip_reset(tp);
9333
9334 __tg3_set_mac_addr(tp, false);
9335
9336 tg3_write_sig_legacy(tp, kind);
9337 tg3_write_sig_post_reset(tp, kind);
9338
9339 if (tp->hw_stats) {
9340 /* Save the stats across chip resets... */
9341 tg3_get_nstats(tp, &tp->net_stats_prev);
9342 tg3_get_estats(tp, &tp->estats_prev);
9343
9344 /* And make sure the next sample is new data */
9345 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9346 }
9347
9348 return err;
9349 }
9350
tg3_set_mac_addr(struct net_device * dev,void * p)9351 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9352 {
9353 struct tg3 *tp = netdev_priv(dev);
9354 struct sockaddr *addr = p;
9355 int err = 0;
9356 bool skip_mac_1 = false;
9357
9358 if (!is_valid_ether_addr(addr->sa_data))
9359 return -EADDRNOTAVAIL;
9360
9361 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9362
9363 if (!netif_running(dev))
9364 return 0;
9365
9366 if (tg3_flag(tp, ENABLE_ASF)) {
9367 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9368
9369 addr0_high = tr32(MAC_ADDR_0_HIGH);
9370 addr0_low = tr32(MAC_ADDR_0_LOW);
9371 addr1_high = tr32(MAC_ADDR_1_HIGH);
9372 addr1_low = tr32(MAC_ADDR_1_LOW);
9373
9374 /* Skip MAC addr 1 if ASF is using it. */
9375 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9376 !(addr1_high == 0 && addr1_low == 0))
9377 skip_mac_1 = true;
9378 }
9379 spin_lock_bh(&tp->lock);
9380 __tg3_set_mac_addr(tp, skip_mac_1);
9381 __tg3_set_rx_mode(dev);
9382 spin_unlock_bh(&tp->lock);
9383
9384 return err;
9385 }
9386
9387 /* tp->lock is held. */
tg3_set_bdinfo(struct tg3 * tp,u32 bdinfo_addr,dma_addr_t mapping,u32 maxlen_flags,u32 nic_addr)9388 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9389 dma_addr_t mapping, u32 maxlen_flags,
9390 u32 nic_addr)
9391 {
9392 tg3_write_mem(tp,
9393 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9394 ((u64) mapping >> 32));
9395 tg3_write_mem(tp,
9396 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9397 ((u64) mapping & 0xffffffff));
9398 tg3_write_mem(tp,
9399 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9400 maxlen_flags);
9401
9402 if (!tg3_flag(tp, 5705_PLUS))
9403 tg3_write_mem(tp,
9404 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9405 nic_addr);
9406 }
9407
9408
tg3_coal_tx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9409 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9410 {
9411 int i = 0;
9412
9413 if (!tg3_flag(tp, ENABLE_TSS)) {
9414 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9415 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9416 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9417 } else {
9418 tw32(HOSTCC_TXCOL_TICKS, 0);
9419 tw32(HOSTCC_TXMAX_FRAMES, 0);
9420 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9421
9422 for (; i < tp->txq_cnt; i++) {
9423 u32 reg;
9424
9425 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9426 tw32(reg, ec->tx_coalesce_usecs);
9427 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9428 tw32(reg, ec->tx_max_coalesced_frames);
9429 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9430 tw32(reg, ec->tx_max_coalesced_frames_irq);
9431 }
9432 }
9433
9434 for (; i < tp->irq_max - 1; i++) {
9435 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9436 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9437 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9438 }
9439 }
9440
tg3_coal_rx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9441 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9442 {
9443 int i = 0;
9444 u32 limit = tp->rxq_cnt;
9445
9446 if (!tg3_flag(tp, ENABLE_RSS)) {
9447 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9448 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9449 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9450 limit--;
9451 } else {
9452 tw32(HOSTCC_RXCOL_TICKS, 0);
9453 tw32(HOSTCC_RXMAX_FRAMES, 0);
9454 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9455 }
9456
9457 for (; i < limit; i++) {
9458 u32 reg;
9459
9460 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9461 tw32(reg, ec->rx_coalesce_usecs);
9462 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9463 tw32(reg, ec->rx_max_coalesced_frames);
9464 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9465 tw32(reg, ec->rx_max_coalesced_frames_irq);
9466 }
9467
9468 for (; i < tp->irq_max - 1; i++) {
9469 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9470 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9471 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9472 }
9473 }
9474
__tg3_set_coalesce(struct tg3 * tp,struct ethtool_coalesce * ec)9475 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9476 {
9477 tg3_coal_tx_init(tp, ec);
9478 tg3_coal_rx_init(tp, ec);
9479
9480 if (!tg3_flag(tp, 5705_PLUS)) {
9481 u32 val = ec->stats_block_coalesce_usecs;
9482
9483 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9484 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9485
9486 if (!tp->link_up)
9487 val = 0;
9488
9489 tw32(HOSTCC_STAT_COAL_TICKS, val);
9490 }
9491 }
9492
9493 /* tp->lock is held. */
tg3_tx_rcbs_disable(struct tg3 * tp)9494 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9495 {
9496 u32 txrcb, limit;
9497
9498 /* Disable all transmit rings but the first. */
9499 if (!tg3_flag(tp, 5705_PLUS))
9500 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9501 else if (tg3_flag(tp, 5717_PLUS))
9502 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9503 else if (tg3_flag(tp, 57765_CLASS) ||
9504 tg3_asic_rev(tp) == ASIC_REV_5762)
9505 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9506 else
9507 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9508
9509 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9510 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9511 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9512 BDINFO_FLAGS_DISABLED);
9513 }
9514
9515 /* tp->lock is held. */
tg3_tx_rcbs_init(struct tg3 * tp)9516 static void tg3_tx_rcbs_init(struct tg3 *tp)
9517 {
9518 int i = 0;
9519 u32 txrcb = NIC_SRAM_SEND_RCB;
9520
9521 if (tg3_flag(tp, ENABLE_TSS))
9522 i++;
9523
9524 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9525 struct tg3_napi *tnapi = &tp->napi[i];
9526
9527 if (!tnapi->tx_ring)
9528 continue;
9529
9530 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9531 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9532 NIC_SRAM_TX_BUFFER_DESC);
9533 }
9534 }
9535
9536 /* tp->lock is held. */
tg3_rx_ret_rcbs_disable(struct tg3 * tp)9537 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9538 {
9539 u32 rxrcb, limit;
9540
9541 /* Disable all receive return rings but the first. */
9542 if (tg3_flag(tp, 5717_PLUS))
9543 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9544 else if (!tg3_flag(tp, 5705_PLUS))
9545 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9546 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9547 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9548 tg3_flag(tp, 57765_CLASS))
9549 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9550 else
9551 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9552
9553 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9554 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9555 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9556 BDINFO_FLAGS_DISABLED);
9557 }
9558
9559 /* tp->lock is held. */
tg3_rx_ret_rcbs_init(struct tg3 * tp)9560 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9561 {
9562 int i = 0;
9563 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9564
9565 if (tg3_flag(tp, ENABLE_RSS))
9566 i++;
9567
9568 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9569 struct tg3_napi *tnapi = &tp->napi[i];
9570
9571 if (!tnapi->rx_rcb)
9572 continue;
9573
9574 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9575 (tp->rx_ret_ring_mask + 1) <<
9576 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9577 }
9578 }
9579
9580 /* tp->lock is held. */
tg3_rings_reset(struct tg3 * tp)9581 static void tg3_rings_reset(struct tg3 *tp)
9582 {
9583 int i;
9584 u32 stblk;
9585 struct tg3_napi *tnapi = &tp->napi[0];
9586
9587 tg3_tx_rcbs_disable(tp);
9588
9589 tg3_rx_ret_rcbs_disable(tp);
9590
9591 /* Disable interrupts */
9592 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9593 tp->napi[0].chk_msi_cnt = 0;
9594 tp->napi[0].last_rx_cons = 0;
9595 tp->napi[0].last_tx_cons = 0;
9596
9597 /* Zero mailbox registers. */
9598 if (tg3_flag(tp, SUPPORT_MSIX)) {
9599 for (i = 1; i < tp->irq_max; i++) {
9600 tp->napi[i].tx_prod = 0;
9601 tp->napi[i].tx_cons = 0;
9602 if (tg3_flag(tp, ENABLE_TSS))
9603 tw32_mailbox(tp->napi[i].prodmbox, 0);
9604 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9605 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9606 tp->napi[i].chk_msi_cnt = 0;
9607 tp->napi[i].last_rx_cons = 0;
9608 tp->napi[i].last_tx_cons = 0;
9609 }
9610 if (!tg3_flag(tp, ENABLE_TSS))
9611 tw32_mailbox(tp->napi[0].prodmbox, 0);
9612 } else {
9613 tp->napi[0].tx_prod = 0;
9614 tp->napi[0].tx_cons = 0;
9615 tw32_mailbox(tp->napi[0].prodmbox, 0);
9616 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9617 }
9618
9619 /* Make sure the NIC-based send BD rings are disabled. */
9620 if (!tg3_flag(tp, 5705_PLUS)) {
9621 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9622 for (i = 0; i < 16; i++)
9623 tw32_tx_mbox(mbox + i * 8, 0);
9624 }
9625
9626 /* Clear status block in ram. */
9627 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9628
9629 /* Set status block DMA address */
9630 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9631 ((u64) tnapi->status_mapping >> 32));
9632 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9633 ((u64) tnapi->status_mapping & 0xffffffff));
9634
9635 stblk = HOSTCC_STATBLCK_RING1;
9636
9637 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9638 u64 mapping = (u64)tnapi->status_mapping;
9639 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9640 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9641 stblk += 8;
9642
9643 /* Clear status block in ram. */
9644 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9645 }
9646
9647 tg3_tx_rcbs_init(tp);
9648 tg3_rx_ret_rcbs_init(tp);
9649 }
9650
tg3_setup_rxbd_thresholds(struct tg3 * tp)9651 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9652 {
9653 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9654
9655 if (!tg3_flag(tp, 5750_PLUS) ||
9656 tg3_flag(tp, 5780_CLASS) ||
9657 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9658 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9659 tg3_flag(tp, 57765_PLUS))
9660 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9661 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9662 tg3_asic_rev(tp) == ASIC_REV_5787)
9663 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9664 else
9665 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9666
9667 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9668 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9669
9670 val = min(nic_rep_thresh, host_rep_thresh);
9671 tw32(RCVBDI_STD_THRESH, val);
9672
9673 if (tg3_flag(tp, 57765_PLUS))
9674 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9675
9676 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9677 return;
9678
9679 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9680
9681 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9682
9683 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9684 tw32(RCVBDI_JUMBO_THRESH, val);
9685
9686 if (tg3_flag(tp, 57765_PLUS))
9687 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9688 }
9689
calc_crc(unsigned char * buf,int len)9690 static inline u32 calc_crc(unsigned char *buf, int len)
9691 {
9692 u32 reg;
9693 u32 tmp;
9694 int j, k;
9695
9696 reg = 0xffffffff;
9697
9698 for (j = 0; j < len; j++) {
9699 reg ^= buf[j];
9700
9701 for (k = 0; k < 8; k++) {
9702 tmp = reg & 0x01;
9703
9704 reg >>= 1;
9705
9706 if (tmp)
9707 reg ^= 0xedb88320;
9708 }
9709 }
9710
9711 return ~reg;
9712 }
9713
tg3_set_multi(struct tg3 * tp,unsigned int accept_all)9714 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9715 {
9716 /* accept or reject all multicast frames */
9717 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9718 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9719 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9720 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9721 }
9722
__tg3_set_rx_mode(struct net_device * dev)9723 static void __tg3_set_rx_mode(struct net_device *dev)
9724 {
9725 struct tg3 *tp = netdev_priv(dev);
9726 u32 rx_mode;
9727
9728 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9729 RX_MODE_KEEP_VLAN_TAG);
9730
9731 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9732 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9733 * flag clear.
9734 */
9735 if (!tg3_flag(tp, ENABLE_ASF))
9736 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9737 #endif
9738
9739 if (dev->flags & IFF_PROMISC) {
9740 /* Promiscuous mode. */
9741 rx_mode |= RX_MODE_PROMISC;
9742 } else if (dev->flags & IFF_ALLMULTI) {
9743 /* Accept all multicast. */
9744 tg3_set_multi(tp, 1);
9745 } else if (netdev_mc_empty(dev)) {
9746 /* Reject all multicast. */
9747 tg3_set_multi(tp, 0);
9748 } else {
9749 /* Accept one or more multicast(s). */
9750 struct netdev_hw_addr *ha;
9751 u32 mc_filter[4] = { 0, };
9752 u32 regidx;
9753 u32 bit;
9754 u32 crc;
9755
9756 netdev_for_each_mc_addr(ha, dev) {
9757 crc = calc_crc(ha->addr, ETH_ALEN);
9758 bit = ~crc & 0x7f;
9759 regidx = (bit & 0x60) >> 5;
9760 bit &= 0x1f;
9761 mc_filter[regidx] |= (1 << bit);
9762 }
9763
9764 tw32(MAC_HASH_REG_0, mc_filter[0]);
9765 tw32(MAC_HASH_REG_1, mc_filter[1]);
9766 tw32(MAC_HASH_REG_2, mc_filter[2]);
9767 tw32(MAC_HASH_REG_3, mc_filter[3]);
9768 }
9769
9770 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9771 rx_mode |= RX_MODE_PROMISC;
9772 } else if (!(dev->flags & IFF_PROMISC)) {
9773 /* Add all entries into to the mac addr filter list */
9774 int i = 0;
9775 struct netdev_hw_addr *ha;
9776
9777 netdev_for_each_uc_addr(ha, dev) {
9778 __tg3_set_one_mac_addr(tp, ha->addr,
9779 i + TG3_UCAST_ADDR_IDX(tp));
9780 i++;
9781 }
9782 }
9783
9784 if (rx_mode != tp->rx_mode) {
9785 tp->rx_mode = rx_mode;
9786 tw32_f(MAC_RX_MODE, rx_mode);
9787 udelay(10);
9788 }
9789 }
9790
tg3_rss_init_dflt_indir_tbl(struct tg3 * tp,u32 qcnt)9791 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9792 {
9793 int i;
9794
9795 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9796 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9797 }
9798
tg3_rss_check_indir_tbl(struct tg3 * tp)9799 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9800 {
9801 int i;
9802
9803 if (!tg3_flag(tp, SUPPORT_MSIX))
9804 return;
9805
9806 if (tp->rxq_cnt == 1) {
9807 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9808 return;
9809 }
9810
9811 /* Validate table against current IRQ count */
9812 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9813 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9814 break;
9815 }
9816
9817 if (i != TG3_RSS_INDIR_TBL_SIZE)
9818 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9819 }
9820
tg3_rss_write_indir_tbl(struct tg3 * tp)9821 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9822 {
9823 int i = 0;
9824 u32 reg = MAC_RSS_INDIR_TBL_0;
9825
9826 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9827 u32 val = tp->rss_ind_tbl[i];
9828 i++;
9829 for (; i % 8; i++) {
9830 val <<= 4;
9831 val |= tp->rss_ind_tbl[i];
9832 }
9833 tw32(reg, val);
9834 reg += 4;
9835 }
9836 }
9837
tg3_lso_rd_dma_workaround_bit(struct tg3 * tp)9838 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9839 {
9840 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9841 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9842 else
9843 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9844 }
9845
9846 /* tp->lock is held. */
tg3_reset_hw(struct tg3 * tp,bool reset_phy)9847 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9848 {
9849 u32 val, rdmac_mode;
9850 int i, err, limit;
9851 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9852
9853 tg3_disable_ints(tp);
9854
9855 tg3_stop_fw(tp);
9856
9857 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9858
9859 if (tg3_flag(tp, INIT_COMPLETE))
9860 tg3_abort_hw(tp, 1);
9861
9862 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9863 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9864 tg3_phy_pull_config(tp);
9865 tg3_eee_pull_config(tp, NULL);
9866 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9867 }
9868
9869 /* Enable MAC control of LPI */
9870 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9871 tg3_setup_eee(tp);
9872
9873 if (reset_phy)
9874 tg3_phy_reset(tp);
9875
9876 err = tg3_chip_reset(tp);
9877 if (err)
9878 return err;
9879
9880 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9881
9882 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9883 val = tr32(TG3_CPMU_CTRL);
9884 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9885 tw32(TG3_CPMU_CTRL, val);
9886
9887 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9888 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9889 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9890 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9891
9892 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9893 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9894 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9895 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9896
9897 val = tr32(TG3_CPMU_HST_ACC);
9898 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9899 val |= CPMU_HST_ACC_MACCLK_6_25;
9900 tw32(TG3_CPMU_HST_ACC, val);
9901 }
9902
9903 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9904 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9905 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9906 PCIE_PWR_MGMT_L1_THRESH_4MS;
9907 tw32(PCIE_PWR_MGMT_THRESH, val);
9908
9909 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9910 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9911
9912 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9913
9914 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9915 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9916 }
9917
9918 if (tg3_flag(tp, L1PLLPD_EN)) {
9919 u32 grc_mode = tr32(GRC_MODE);
9920
9921 /* Access the lower 1K of PL PCIE block registers. */
9922 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9923 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9924
9925 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9926 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9927 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9928
9929 tw32(GRC_MODE, grc_mode);
9930 }
9931
9932 if (tg3_flag(tp, 57765_CLASS)) {
9933 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9934 u32 grc_mode = tr32(GRC_MODE);
9935
9936 /* Access the lower 1K of PL PCIE block registers. */
9937 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9938 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9939
9940 val = tr32(TG3_PCIE_TLDLPL_PORT +
9941 TG3_PCIE_PL_LO_PHYCTL5);
9942 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9943 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9944
9945 tw32(GRC_MODE, grc_mode);
9946 }
9947
9948 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9949 u32 grc_mode;
9950
9951 /* Fix transmit hangs */
9952 val = tr32(TG3_CPMU_PADRNG_CTL);
9953 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9954 tw32(TG3_CPMU_PADRNG_CTL, val);
9955
9956 grc_mode = tr32(GRC_MODE);
9957
9958 /* Access the lower 1K of DL PCIE block registers. */
9959 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9960 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9961
9962 val = tr32(TG3_PCIE_TLDLPL_PORT +
9963 TG3_PCIE_DL_LO_FTSMAX);
9964 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9965 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9966 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9967
9968 tw32(GRC_MODE, grc_mode);
9969 }
9970
9971 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9972 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9973 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9974 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9975 }
9976
9977 /* This works around an issue with Athlon chipsets on
9978 * B3 tigon3 silicon. This bit has no effect on any
9979 * other revision. But do not set this on PCI Express
9980 * chips and don't even touch the clocks if the CPMU is present.
9981 */
9982 if (!tg3_flag(tp, CPMU_PRESENT)) {
9983 if (!tg3_flag(tp, PCI_EXPRESS))
9984 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9985 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9986 }
9987
9988 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9989 tg3_flag(tp, PCIX_MODE)) {
9990 val = tr32(TG3PCI_PCISTATE);
9991 val |= PCISTATE_RETRY_SAME_DMA;
9992 tw32(TG3PCI_PCISTATE, val);
9993 }
9994
9995 if (tg3_flag(tp, ENABLE_APE)) {
9996 /* Allow reads and writes to the
9997 * APE register and memory space.
9998 */
9999 val = tr32(TG3PCI_PCISTATE);
10000 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10001 PCISTATE_ALLOW_APE_SHMEM_WR |
10002 PCISTATE_ALLOW_APE_PSPACE_WR;
10003 tw32(TG3PCI_PCISTATE, val);
10004 }
10005
10006 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10007 /* Enable some hw fixes. */
10008 val = tr32(TG3PCI_MSI_DATA);
10009 val |= (1 << 26) | (1 << 28) | (1 << 29);
10010 tw32(TG3PCI_MSI_DATA, val);
10011 }
10012
10013 /* Descriptor ring init may make accesses to the
10014 * NIC SRAM area to setup the TX descriptors, so we
10015 * can only do this after the hardware has been
10016 * successfully reset.
10017 */
10018 err = tg3_init_rings(tp);
10019 if (err)
10020 return err;
10021
10022 if (tg3_flag(tp, 57765_PLUS)) {
10023 val = tr32(TG3PCI_DMA_RW_CTRL) &
10024 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10025 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10026 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10027 if (!tg3_flag(tp, 57765_CLASS) &&
10028 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10029 tg3_asic_rev(tp) != ASIC_REV_5762)
10030 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10031 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10032 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10033 tg3_asic_rev(tp) != ASIC_REV_5761) {
10034 /* This value is determined during the probe time DMA
10035 * engine test, tg3_test_dma.
10036 */
10037 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10038 }
10039
10040 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10041 GRC_MODE_4X_NIC_SEND_RINGS |
10042 GRC_MODE_NO_TX_PHDR_CSUM |
10043 GRC_MODE_NO_RX_PHDR_CSUM);
10044 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10045
10046 /* Pseudo-header checksum is done by hardware logic and not
10047 * the offload processers, so make the chip do the pseudo-
10048 * header checksums on receive. For transmit it is more
10049 * convenient to do the pseudo-header checksum in software
10050 * as Linux does that on transmit for us in all cases.
10051 */
10052 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10053
10054 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10055 if (tp->rxptpctl)
10056 tw32(TG3_RX_PTP_CTL,
10057 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10058
10059 if (tg3_flag(tp, PTP_CAPABLE))
10060 val |= GRC_MODE_TIME_SYNC_ENABLE;
10061
10062 tw32(GRC_MODE, tp->grc_mode | val);
10063
10064 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10065 * south bridge limitation. As a workaround, Driver is setting MRRS
10066 * to 2048 instead of default 4096.
10067 */
10068 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10069 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10070 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10071 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10072 }
10073
10074 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10075 val = tr32(GRC_MISC_CFG);
10076 val &= ~0xff;
10077 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10078 tw32(GRC_MISC_CFG, val);
10079
10080 /* Initialize MBUF/DESC pool. */
10081 if (tg3_flag(tp, 5750_PLUS)) {
10082 /* Do nothing. */
10083 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10084 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10085 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10086 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10087 else
10088 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10089 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10090 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10091 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10092 int fw_len;
10093
10094 fw_len = tp->fw_len;
10095 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10096 tw32(BUFMGR_MB_POOL_ADDR,
10097 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10098 tw32(BUFMGR_MB_POOL_SIZE,
10099 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10100 }
10101
10102 if (tp->dev->mtu <= ETH_DATA_LEN) {
10103 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10104 tp->bufmgr_config.mbuf_read_dma_low_water);
10105 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10106 tp->bufmgr_config.mbuf_mac_rx_low_water);
10107 tw32(BUFMGR_MB_HIGH_WATER,
10108 tp->bufmgr_config.mbuf_high_water);
10109 } else {
10110 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10111 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10112 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10113 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10114 tw32(BUFMGR_MB_HIGH_WATER,
10115 tp->bufmgr_config.mbuf_high_water_jumbo);
10116 }
10117 tw32(BUFMGR_DMA_LOW_WATER,
10118 tp->bufmgr_config.dma_low_water);
10119 tw32(BUFMGR_DMA_HIGH_WATER,
10120 tp->bufmgr_config.dma_high_water);
10121
10122 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10123 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10124 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10125 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10126 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10127 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10128 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10129 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10130 tw32(BUFMGR_MODE, val);
10131 for (i = 0; i < 2000; i++) {
10132 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10133 break;
10134 udelay(10);
10135 }
10136 if (i >= 2000) {
10137 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10138 return -ENODEV;
10139 }
10140
10141 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10142 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10143
10144 tg3_setup_rxbd_thresholds(tp);
10145
10146 /* Initialize TG3_BDINFO's at:
10147 * RCVDBDI_STD_BD: standard eth size rx ring
10148 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10149 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10150 *
10151 * like so:
10152 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10153 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10154 * ring attribute flags
10155 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10156 *
10157 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10158 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10159 *
10160 * The size of each ring is fixed in the firmware, but the location is
10161 * configurable.
10162 */
10163 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10164 ((u64) tpr->rx_std_mapping >> 32));
10165 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10166 ((u64) tpr->rx_std_mapping & 0xffffffff));
10167 if (!tg3_flag(tp, 5717_PLUS))
10168 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10169 NIC_SRAM_RX_BUFFER_DESC);
10170
10171 /* Disable the mini ring */
10172 if (!tg3_flag(tp, 5705_PLUS))
10173 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10174 BDINFO_FLAGS_DISABLED);
10175
10176 /* Program the jumbo buffer descriptor ring control
10177 * blocks on those devices that have them.
10178 */
10179 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10180 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10181
10182 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10183 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10184 ((u64) tpr->rx_jmb_mapping >> 32));
10185 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10186 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10187 val = TG3_RX_JMB_RING_SIZE(tp) <<
10188 BDINFO_FLAGS_MAXLEN_SHIFT;
10189 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10190 val | BDINFO_FLAGS_USE_EXT_RECV);
10191 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10192 tg3_flag(tp, 57765_CLASS) ||
10193 tg3_asic_rev(tp) == ASIC_REV_5762)
10194 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10195 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10196 } else {
10197 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10198 BDINFO_FLAGS_DISABLED);
10199 }
10200
10201 if (tg3_flag(tp, 57765_PLUS)) {
10202 val = TG3_RX_STD_RING_SIZE(tp);
10203 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10204 val |= (TG3_RX_STD_DMA_SZ << 2);
10205 } else
10206 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10207 } else
10208 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10209
10210 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10211
10212 tpr->rx_std_prod_idx = tp->rx_pending;
10213 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10214
10215 tpr->rx_jmb_prod_idx =
10216 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10217 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10218
10219 tg3_rings_reset(tp);
10220
10221 /* Initialize MAC address and backoff seed. */
10222 __tg3_set_mac_addr(tp, false);
10223
10224 /* MTU + ethernet header + FCS + optional VLAN tag */
10225 tw32(MAC_RX_MTU_SIZE,
10226 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10227
10228 /* The slot time is changed by tg3_setup_phy if we
10229 * run at gigabit with half duplex.
10230 */
10231 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10232 (6 << TX_LENGTHS_IPG_SHIFT) |
10233 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10234
10235 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10236 tg3_asic_rev(tp) == ASIC_REV_5762)
10237 val |= tr32(MAC_TX_LENGTHS) &
10238 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10239 TX_LENGTHS_CNT_DWN_VAL_MSK);
10240
10241 tw32(MAC_TX_LENGTHS, val);
10242
10243 /* Receive rules. */
10244 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10245 tw32(RCVLPC_CONFIG, 0x0181);
10246
10247 /* Calculate RDMAC_MODE setting early, we need it to determine
10248 * the RCVLPC_STATE_ENABLE mask.
10249 */
10250 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10251 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10252 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10253 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10254 RDMAC_MODE_LNGREAD_ENAB);
10255
10256 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10257 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10258
10259 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10260 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10261 tg3_asic_rev(tp) == ASIC_REV_57780)
10262 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10263 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10264 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10265
10266 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10267 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10268 if (tg3_flag(tp, TSO_CAPABLE) &&
10269 tg3_asic_rev(tp) == ASIC_REV_5705) {
10270 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10271 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10272 !tg3_flag(tp, IS_5788)) {
10273 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10274 }
10275 }
10276
10277 if (tg3_flag(tp, PCI_EXPRESS))
10278 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10279
10280 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10281 tp->dma_limit = 0;
10282 if (tp->dev->mtu <= ETH_DATA_LEN) {
10283 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10284 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10285 }
10286 }
10287
10288 if (tg3_flag(tp, HW_TSO_1) ||
10289 tg3_flag(tp, HW_TSO_2) ||
10290 tg3_flag(tp, HW_TSO_3))
10291 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10292
10293 if (tg3_flag(tp, 57765_PLUS) ||
10294 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10295 tg3_asic_rev(tp) == ASIC_REV_57780)
10296 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10297
10298 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10299 tg3_asic_rev(tp) == ASIC_REV_5762)
10300 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10301
10302 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10303 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10304 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10305 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10306 tg3_flag(tp, 57765_PLUS)) {
10307 u32 tgtreg;
10308
10309 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10310 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10311 else
10312 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10313
10314 val = tr32(tgtreg);
10315 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10316 tg3_asic_rev(tp) == ASIC_REV_5762) {
10317 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10318 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10319 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10320 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10321 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10322 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10323 }
10324 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10325 }
10326
10327 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10328 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10329 tg3_asic_rev(tp) == ASIC_REV_5762) {
10330 u32 tgtreg;
10331
10332 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10333 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10334 else
10335 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10336
10337 val = tr32(tgtreg);
10338 tw32(tgtreg, val |
10339 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10340 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10341 }
10342
10343 /* Receive/send statistics. */
10344 if (tg3_flag(tp, 5750_PLUS)) {
10345 val = tr32(RCVLPC_STATS_ENABLE);
10346 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10347 tw32(RCVLPC_STATS_ENABLE, val);
10348 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10349 tg3_flag(tp, TSO_CAPABLE)) {
10350 val = tr32(RCVLPC_STATS_ENABLE);
10351 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10352 tw32(RCVLPC_STATS_ENABLE, val);
10353 } else {
10354 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10355 }
10356 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10357 tw32(SNDDATAI_STATSENAB, 0xffffff);
10358 tw32(SNDDATAI_STATSCTRL,
10359 (SNDDATAI_SCTRL_ENABLE |
10360 SNDDATAI_SCTRL_FASTUPD));
10361
10362 /* Setup host coalescing engine. */
10363 tw32(HOSTCC_MODE, 0);
10364 for (i = 0; i < 2000; i++) {
10365 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10366 break;
10367 udelay(10);
10368 }
10369
10370 __tg3_set_coalesce(tp, &tp->coal);
10371
10372 if (!tg3_flag(tp, 5705_PLUS)) {
10373 /* Status/statistics block address. See tg3_timer,
10374 * the tg3_periodic_fetch_stats call there, and
10375 * tg3_get_stats to see how this works for 5705/5750 chips.
10376 */
10377 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10378 ((u64) tp->stats_mapping >> 32));
10379 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10380 ((u64) tp->stats_mapping & 0xffffffff));
10381 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10382
10383 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10384
10385 /* Clear statistics and status block memory areas */
10386 for (i = NIC_SRAM_STATS_BLK;
10387 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10388 i += sizeof(u32)) {
10389 tg3_write_mem(tp, i, 0);
10390 udelay(40);
10391 }
10392 }
10393
10394 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10395
10396 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10397 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10398 if (!tg3_flag(tp, 5705_PLUS))
10399 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10400
10401 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10402 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10403 /* reset to prevent losing 1st rx packet intermittently */
10404 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10405 udelay(10);
10406 }
10407
10408 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10409 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10410 MAC_MODE_FHDE_ENABLE;
10411 if (tg3_flag(tp, ENABLE_APE))
10412 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10413 if (!tg3_flag(tp, 5705_PLUS) &&
10414 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10415 tg3_asic_rev(tp) != ASIC_REV_5700)
10416 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10417 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10418 udelay(40);
10419
10420 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10421 * If TG3_FLAG_IS_NIC is zero, we should read the
10422 * register to preserve the GPIO settings for LOMs. The GPIOs,
10423 * whether used as inputs or outputs, are set by boot code after
10424 * reset.
10425 */
10426 if (!tg3_flag(tp, IS_NIC)) {
10427 u32 gpio_mask;
10428
10429 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10430 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10431 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10432
10433 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10434 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10435 GRC_LCLCTRL_GPIO_OUTPUT3;
10436
10437 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10438 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10439
10440 tp->grc_local_ctrl &= ~gpio_mask;
10441 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10442
10443 /* GPIO1 must be driven high for eeprom write protect */
10444 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10445 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10446 GRC_LCLCTRL_GPIO_OUTPUT1);
10447 }
10448 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10449 udelay(100);
10450
10451 if (tg3_flag(tp, USING_MSIX)) {
10452 val = tr32(MSGINT_MODE);
10453 val |= MSGINT_MODE_ENABLE;
10454 if (tp->irq_cnt > 1)
10455 val |= MSGINT_MODE_MULTIVEC_EN;
10456 if (!tg3_flag(tp, 1SHOT_MSI))
10457 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10458 tw32(MSGINT_MODE, val);
10459 }
10460
10461 if (!tg3_flag(tp, 5705_PLUS)) {
10462 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10463 udelay(40);
10464 }
10465
10466 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10467 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10468 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10469 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10470 WDMAC_MODE_LNGREAD_ENAB);
10471
10472 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10473 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10474 if (tg3_flag(tp, TSO_CAPABLE) &&
10475 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10476 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10477 /* nothing */
10478 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10479 !tg3_flag(tp, IS_5788)) {
10480 val |= WDMAC_MODE_RX_ACCEL;
10481 }
10482 }
10483
10484 /* Enable host coalescing bug fix */
10485 if (tg3_flag(tp, 5755_PLUS))
10486 val |= WDMAC_MODE_STATUS_TAG_FIX;
10487
10488 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10489 val |= WDMAC_MODE_BURST_ALL_DATA;
10490
10491 tw32_f(WDMAC_MODE, val);
10492 udelay(40);
10493
10494 if (tg3_flag(tp, PCIX_MODE)) {
10495 u16 pcix_cmd;
10496
10497 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10498 &pcix_cmd);
10499 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10500 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10501 pcix_cmd |= PCI_X_CMD_READ_2K;
10502 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10503 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10504 pcix_cmd |= PCI_X_CMD_READ_2K;
10505 }
10506 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10507 pcix_cmd);
10508 }
10509
10510 tw32_f(RDMAC_MODE, rdmac_mode);
10511 udelay(40);
10512
10513 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10514 tg3_asic_rev(tp) == ASIC_REV_5720) {
10515 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10516 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10517 break;
10518 }
10519 if (i < TG3_NUM_RDMA_CHANNELS) {
10520 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10521 val |= tg3_lso_rd_dma_workaround_bit(tp);
10522 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10523 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10524 }
10525 }
10526
10527 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10528 if (!tg3_flag(tp, 5705_PLUS))
10529 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10530
10531 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10532 tw32(SNDDATAC_MODE,
10533 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10534 else
10535 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10536
10537 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10538 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10539 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10540 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10541 val |= RCVDBDI_MODE_LRG_RING_SZ;
10542 tw32(RCVDBDI_MODE, val);
10543 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10544 if (tg3_flag(tp, HW_TSO_1) ||
10545 tg3_flag(tp, HW_TSO_2) ||
10546 tg3_flag(tp, HW_TSO_3))
10547 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10548 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10549 if (tg3_flag(tp, ENABLE_TSS))
10550 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10551 tw32(SNDBDI_MODE, val);
10552 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10553
10554 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10555 err = tg3_load_5701_a0_firmware_fix(tp);
10556 if (err)
10557 return err;
10558 }
10559
10560 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10561 /* Ignore any errors for the firmware download. If download
10562 * fails, the device will operate with EEE disabled
10563 */
10564 tg3_load_57766_firmware(tp);
10565 }
10566
10567 if (tg3_flag(tp, TSO_CAPABLE)) {
10568 err = tg3_load_tso_firmware(tp);
10569 if (err)
10570 return err;
10571 }
10572
10573 tp->tx_mode = TX_MODE_ENABLE;
10574
10575 if (tg3_flag(tp, 5755_PLUS) ||
10576 tg3_asic_rev(tp) == ASIC_REV_5906)
10577 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10578
10579 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10580 tg3_asic_rev(tp) == ASIC_REV_5762) {
10581 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10582 tp->tx_mode &= ~val;
10583 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10584 }
10585
10586 tw32_f(MAC_TX_MODE, tp->tx_mode);
10587 udelay(100);
10588
10589 if (tg3_flag(tp, ENABLE_RSS)) {
10590 u32 rss_key[10];
10591
10592 tg3_rss_write_indir_tbl(tp);
10593
10594 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10595
10596 for (i = 0; i < 10 ; i++)
10597 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10598 }
10599
10600 tp->rx_mode = RX_MODE_ENABLE;
10601 if (tg3_flag(tp, 5755_PLUS))
10602 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10603
10604 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10605 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10606
10607 if (tg3_flag(tp, ENABLE_RSS))
10608 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10609 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10610 RX_MODE_RSS_IPV6_HASH_EN |
10611 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10612 RX_MODE_RSS_IPV4_HASH_EN |
10613 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10614
10615 tw32_f(MAC_RX_MODE, tp->rx_mode);
10616 udelay(10);
10617
10618 tw32(MAC_LED_CTRL, tp->led_ctrl);
10619
10620 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10621 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10622 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10623 udelay(10);
10624 }
10625 tw32_f(MAC_RX_MODE, tp->rx_mode);
10626 udelay(10);
10627
10628 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10629 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10630 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10631 /* Set drive transmission level to 1.2V */
10632 /* only if the signal pre-emphasis bit is not set */
10633 val = tr32(MAC_SERDES_CFG);
10634 val &= 0xfffff000;
10635 val |= 0x880;
10636 tw32(MAC_SERDES_CFG, val);
10637 }
10638 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10639 tw32(MAC_SERDES_CFG, 0x616000);
10640 }
10641
10642 /* Prevent chip from dropping frames when flow control
10643 * is enabled.
10644 */
10645 if (tg3_flag(tp, 57765_CLASS))
10646 val = 1;
10647 else
10648 val = 2;
10649 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10650
10651 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10652 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10653 /* Use hardware link auto-negotiation */
10654 tg3_flag_set(tp, HW_AUTONEG);
10655 }
10656
10657 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10658 tg3_asic_rev(tp) == ASIC_REV_5714) {
10659 u32 tmp;
10660
10661 tmp = tr32(SERDES_RX_CTRL);
10662 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10663 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10664 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10665 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10666 }
10667
10668 if (!tg3_flag(tp, USE_PHYLIB)) {
10669 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10670 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10671
10672 err = tg3_setup_phy(tp, false);
10673 if (err)
10674 return err;
10675
10676 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10677 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10678 u32 tmp;
10679
10680 /* Clear CRC stats. */
10681 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10682 tg3_writephy(tp, MII_TG3_TEST1,
10683 tmp | MII_TG3_TEST1_CRC_EN);
10684 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10685 }
10686 }
10687 }
10688
10689 __tg3_set_rx_mode(tp->dev);
10690
10691 /* Initialize receive rules. */
10692 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10693 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10694 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10695 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10696
10697 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10698 limit = 8;
10699 else
10700 limit = 16;
10701 if (tg3_flag(tp, ENABLE_ASF))
10702 limit -= 4;
10703 switch (limit) {
10704 case 16:
10705 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10706 case 15:
10707 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10708 case 14:
10709 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10710 case 13:
10711 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10712 case 12:
10713 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10714 case 11:
10715 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10716 case 10:
10717 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10718 case 9:
10719 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10720 case 8:
10721 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10722 case 7:
10723 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10724 case 6:
10725 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10726 case 5:
10727 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10728 case 4:
10729 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10730 case 3:
10731 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10732 case 2:
10733 case 1:
10734
10735 default:
10736 break;
10737 }
10738
10739 if (tg3_flag(tp, ENABLE_APE))
10740 /* Write our heartbeat update interval to APE. */
10741 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10742 APE_HOST_HEARTBEAT_INT_DISABLE);
10743
10744 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10745
10746 return 0;
10747 }
10748
10749 /* Called at device open time to get the chip ready for
10750 * packet processing. Invoked with tp->lock held.
10751 */
tg3_init_hw(struct tg3 * tp,bool reset_phy)10752 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10753 {
10754 /* Chip may have been just powered on. If so, the boot code may still
10755 * be running initialization. Wait for it to finish to avoid races in
10756 * accessing the hardware.
10757 */
10758 tg3_enable_register_access(tp);
10759 tg3_poll_fw(tp);
10760
10761 tg3_switch_clocks(tp);
10762
10763 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10764
10765 return tg3_reset_hw(tp, reset_phy);
10766 }
10767
tg3_sd_scan_scratchpad(struct tg3 * tp,struct tg3_ocir * ocir)10768 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10769 {
10770 int i;
10771
10772 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10773 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10774
10775 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10776 off += len;
10777
10778 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10779 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10780 memset(ocir, 0, TG3_OCIR_LEN);
10781 }
10782 }
10783
10784 /* sysfs attributes for hwmon */
tg3_show_temp(struct device * dev,struct device_attribute * devattr,char * buf)10785 static ssize_t tg3_show_temp(struct device *dev,
10786 struct device_attribute *devattr, char *buf)
10787 {
10788 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10789 struct tg3 *tp = dev_get_drvdata(dev);
10790 u32 temperature;
10791
10792 spin_lock_bh(&tp->lock);
10793 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10794 sizeof(temperature));
10795 spin_unlock_bh(&tp->lock);
10796 return sprintf(buf, "%u\n", temperature * 1000);
10797 }
10798
10799
10800 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10801 TG3_TEMP_SENSOR_OFFSET);
10802 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10803 TG3_TEMP_CAUTION_OFFSET);
10804 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10805 TG3_TEMP_MAX_OFFSET);
10806
10807 static struct attribute *tg3_attrs[] = {
10808 &sensor_dev_attr_temp1_input.dev_attr.attr,
10809 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10810 &sensor_dev_attr_temp1_max.dev_attr.attr,
10811 NULL
10812 };
10813 ATTRIBUTE_GROUPS(tg3);
10814
tg3_hwmon_close(struct tg3 * tp)10815 static void tg3_hwmon_close(struct tg3 *tp)
10816 {
10817 if (tp->hwmon_dev) {
10818 hwmon_device_unregister(tp->hwmon_dev);
10819 tp->hwmon_dev = NULL;
10820 }
10821 }
10822
tg3_hwmon_open(struct tg3 * tp)10823 static void tg3_hwmon_open(struct tg3 *tp)
10824 {
10825 int i;
10826 u32 size = 0;
10827 struct pci_dev *pdev = tp->pdev;
10828 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10829
10830 tg3_sd_scan_scratchpad(tp, ocirs);
10831
10832 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10833 if (!ocirs[i].src_data_length)
10834 continue;
10835
10836 size += ocirs[i].src_hdr_length;
10837 size += ocirs[i].src_data_length;
10838 }
10839
10840 if (!size)
10841 return;
10842
10843 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10844 tp, tg3_groups);
10845 if (IS_ERR(tp->hwmon_dev)) {
10846 tp->hwmon_dev = NULL;
10847 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10848 }
10849 }
10850
10851
10852 #define TG3_STAT_ADD32(PSTAT, REG) \
10853 do { u32 __val = tr32(REG); \
10854 (PSTAT)->low += __val; \
10855 if ((PSTAT)->low < __val) \
10856 (PSTAT)->high += 1; \
10857 } while (0)
10858
tg3_periodic_fetch_stats(struct tg3 * tp)10859 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10860 {
10861 struct tg3_hw_stats *sp = tp->hw_stats;
10862
10863 if (!tp->link_up)
10864 return;
10865
10866 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10867 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10868 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10869 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10870 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10871 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10872 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10873 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10874 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10875 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10876 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10877 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10878 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10879 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10880 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10881 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10882 u32 val;
10883
10884 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10885 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10886 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10887 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10888 }
10889
10890 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10891 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10892 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10893 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10894 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10895 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10896 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10897 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10898 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10899 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10900 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10901 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10902 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10903 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10904
10905 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10906 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10907 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10908 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10909 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10910 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10911 } else {
10912 u32 val = tr32(HOSTCC_FLOW_ATTN);
10913 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10914 if (val) {
10915 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10916 sp->rx_discards.low += val;
10917 if (sp->rx_discards.low < val)
10918 sp->rx_discards.high += 1;
10919 }
10920 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10921 }
10922 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10923 }
10924
tg3_chk_missed_msi(struct tg3 * tp)10925 static void tg3_chk_missed_msi(struct tg3 *tp)
10926 {
10927 u32 i;
10928
10929 for (i = 0; i < tp->irq_cnt; i++) {
10930 struct tg3_napi *tnapi = &tp->napi[i];
10931
10932 if (tg3_has_work(tnapi)) {
10933 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10934 tnapi->last_tx_cons == tnapi->tx_cons) {
10935 if (tnapi->chk_msi_cnt < 1) {
10936 tnapi->chk_msi_cnt++;
10937 return;
10938 }
10939 tg3_msi(0, tnapi);
10940 }
10941 }
10942 tnapi->chk_msi_cnt = 0;
10943 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10944 tnapi->last_tx_cons = tnapi->tx_cons;
10945 }
10946 }
10947
tg3_timer(unsigned long __opaque)10948 static void tg3_timer(unsigned long __opaque)
10949 {
10950 struct tg3 *tp = (struct tg3 *) __opaque;
10951
10952 spin_lock(&tp->lock);
10953
10954 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10955 spin_unlock(&tp->lock);
10956 goto restart_timer;
10957 }
10958
10959 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10960 tg3_flag(tp, 57765_CLASS))
10961 tg3_chk_missed_msi(tp);
10962
10963 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10964 /* BCM4785: Flush posted writes from GbE to host memory. */
10965 tr32(HOSTCC_MODE);
10966 }
10967
10968 if (!tg3_flag(tp, TAGGED_STATUS)) {
10969 /* All of this garbage is because when using non-tagged
10970 * IRQ status the mailbox/status_block protocol the chip
10971 * uses with the cpu is race prone.
10972 */
10973 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10974 tw32(GRC_LOCAL_CTRL,
10975 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10976 } else {
10977 tw32(HOSTCC_MODE, tp->coalesce_mode |
10978 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10979 }
10980
10981 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10982 spin_unlock(&tp->lock);
10983 tg3_reset_task_schedule(tp);
10984 goto restart_timer;
10985 }
10986 }
10987
10988 /* This part only runs once per second. */
10989 if (!--tp->timer_counter) {
10990 if (tg3_flag(tp, 5705_PLUS))
10991 tg3_periodic_fetch_stats(tp);
10992
10993 if (tp->setlpicnt && !--tp->setlpicnt)
10994 tg3_phy_eee_enable(tp);
10995
10996 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10997 u32 mac_stat;
10998 int phy_event;
10999
11000 mac_stat = tr32(MAC_STATUS);
11001
11002 phy_event = 0;
11003 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11004 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11005 phy_event = 1;
11006 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11007 phy_event = 1;
11008
11009 if (phy_event)
11010 tg3_setup_phy(tp, false);
11011 } else if (tg3_flag(tp, POLL_SERDES)) {
11012 u32 mac_stat = tr32(MAC_STATUS);
11013 int need_setup = 0;
11014
11015 if (tp->link_up &&
11016 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11017 need_setup = 1;
11018 }
11019 if (!tp->link_up &&
11020 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11021 MAC_STATUS_SIGNAL_DET))) {
11022 need_setup = 1;
11023 }
11024 if (need_setup) {
11025 if (!tp->serdes_counter) {
11026 tw32_f(MAC_MODE,
11027 (tp->mac_mode &
11028 ~MAC_MODE_PORT_MODE_MASK));
11029 udelay(40);
11030 tw32_f(MAC_MODE, tp->mac_mode);
11031 udelay(40);
11032 }
11033 tg3_setup_phy(tp, false);
11034 }
11035 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11036 tg3_flag(tp, 5780_CLASS)) {
11037 tg3_serdes_parallel_detect(tp);
11038 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11039 u32 cpmu = tr32(TG3_CPMU_STATUS);
11040 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11041 TG3_CPMU_STATUS_LINK_MASK);
11042
11043 if (link_up != tp->link_up)
11044 tg3_setup_phy(tp, false);
11045 }
11046
11047 tp->timer_counter = tp->timer_multiplier;
11048 }
11049
11050 /* Heartbeat is only sent once every 2 seconds.
11051 *
11052 * The heartbeat is to tell the ASF firmware that the host
11053 * driver is still alive. In the event that the OS crashes,
11054 * ASF needs to reset the hardware to free up the FIFO space
11055 * that may be filled with rx packets destined for the host.
11056 * If the FIFO is full, ASF will no longer function properly.
11057 *
11058 * Unintended resets have been reported on real time kernels
11059 * where the timer doesn't run on time. Netpoll will also have
11060 * same problem.
11061 *
11062 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11063 * to check the ring condition when the heartbeat is expiring
11064 * before doing the reset. This will prevent most unintended
11065 * resets.
11066 */
11067 if (!--tp->asf_counter) {
11068 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11069 tg3_wait_for_event_ack(tp);
11070
11071 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11072 FWCMD_NICDRV_ALIVE3);
11073 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11074 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11075 TG3_FW_UPDATE_TIMEOUT_SEC);
11076
11077 tg3_generate_fw_event(tp);
11078 }
11079 tp->asf_counter = tp->asf_multiplier;
11080 }
11081
11082 spin_unlock(&tp->lock);
11083
11084 restart_timer:
11085 tp->timer.expires = jiffies + tp->timer_offset;
11086 add_timer(&tp->timer);
11087 }
11088
tg3_timer_init(struct tg3 * tp)11089 static void tg3_timer_init(struct tg3 *tp)
11090 {
11091 if (tg3_flag(tp, TAGGED_STATUS) &&
11092 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11093 !tg3_flag(tp, 57765_CLASS))
11094 tp->timer_offset = HZ;
11095 else
11096 tp->timer_offset = HZ / 10;
11097
11098 BUG_ON(tp->timer_offset > HZ);
11099
11100 tp->timer_multiplier = (HZ / tp->timer_offset);
11101 tp->asf_multiplier = (HZ / tp->timer_offset) *
11102 TG3_FW_UPDATE_FREQ_SEC;
11103
11104 init_timer(&tp->timer);
11105 tp->timer.data = (unsigned long) tp;
11106 tp->timer.function = tg3_timer;
11107 }
11108
tg3_timer_start(struct tg3 * tp)11109 static void tg3_timer_start(struct tg3 *tp)
11110 {
11111 tp->asf_counter = tp->asf_multiplier;
11112 tp->timer_counter = tp->timer_multiplier;
11113
11114 tp->timer.expires = jiffies + tp->timer_offset;
11115 add_timer(&tp->timer);
11116 }
11117
tg3_timer_stop(struct tg3 * tp)11118 static void tg3_timer_stop(struct tg3 *tp)
11119 {
11120 del_timer_sync(&tp->timer);
11121 }
11122
11123 /* Restart hardware after configuration changes, self-test, etc.
11124 * Invoked with tp->lock held.
11125 */
tg3_restart_hw(struct tg3 * tp,bool reset_phy)11126 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11127 __releases(tp->lock)
11128 __acquires(tp->lock)
11129 {
11130 int err;
11131
11132 err = tg3_init_hw(tp, reset_phy);
11133 if (err) {
11134 netdev_err(tp->dev,
11135 "Failed to re-initialize device, aborting\n");
11136 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11137 tg3_full_unlock(tp);
11138 tg3_timer_stop(tp);
11139 tp->irq_sync = 0;
11140 tg3_napi_enable(tp);
11141 dev_close(tp->dev);
11142 tg3_full_lock(tp, 0);
11143 }
11144 return err;
11145 }
11146
tg3_reset_task(struct work_struct * work)11147 static void tg3_reset_task(struct work_struct *work)
11148 {
11149 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11150 int err;
11151
11152 rtnl_lock();
11153 tg3_full_lock(tp, 0);
11154
11155 if (!netif_running(tp->dev)) {
11156 tg3_flag_clear(tp, RESET_TASK_PENDING);
11157 tg3_full_unlock(tp);
11158 rtnl_unlock();
11159 return;
11160 }
11161
11162 tg3_full_unlock(tp);
11163
11164 tg3_phy_stop(tp);
11165
11166 tg3_netif_stop(tp);
11167
11168 tg3_full_lock(tp, 1);
11169
11170 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11171 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11172 tp->write32_rx_mbox = tg3_write_flush_reg32;
11173 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11174 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11175 }
11176
11177 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11178 err = tg3_init_hw(tp, true);
11179 if (err) {
11180 tg3_full_unlock(tp);
11181 tp->irq_sync = 0;
11182 tg3_napi_enable(tp);
11183 /* Clear this flag so that tg3_reset_task_cancel() will not
11184 * call cancel_work_sync() and wait forever.
11185 */
11186 tg3_flag_clear(tp, RESET_TASK_PENDING);
11187 dev_close(tp->dev);
11188 goto out;
11189 }
11190
11191 tg3_netif_start(tp);
11192
11193 tg3_full_unlock(tp);
11194
11195 if (!err)
11196 tg3_phy_start(tp);
11197
11198 tg3_flag_clear(tp, RESET_TASK_PENDING);
11199 out:
11200 rtnl_unlock();
11201 }
11202
tg3_request_irq(struct tg3 * tp,int irq_num)11203 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11204 {
11205 irq_handler_t fn;
11206 unsigned long flags;
11207 char *name;
11208 struct tg3_napi *tnapi = &tp->napi[irq_num];
11209
11210 if (tp->irq_cnt == 1)
11211 name = tp->dev->name;
11212 else {
11213 name = &tnapi->irq_lbl[0];
11214 if (tnapi->tx_buffers && tnapi->rx_rcb)
11215 snprintf(name, IFNAMSIZ,
11216 "%s-txrx-%d", tp->dev->name, irq_num);
11217 else if (tnapi->tx_buffers)
11218 snprintf(name, IFNAMSIZ,
11219 "%s-tx-%d", tp->dev->name, irq_num);
11220 else if (tnapi->rx_rcb)
11221 snprintf(name, IFNAMSIZ,
11222 "%s-rx-%d", tp->dev->name, irq_num);
11223 else
11224 snprintf(name, IFNAMSIZ,
11225 "%s-%d", tp->dev->name, irq_num);
11226 name[IFNAMSIZ-1] = 0;
11227 }
11228
11229 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11230 fn = tg3_msi;
11231 if (tg3_flag(tp, 1SHOT_MSI))
11232 fn = tg3_msi_1shot;
11233 flags = 0;
11234 } else {
11235 fn = tg3_interrupt;
11236 if (tg3_flag(tp, TAGGED_STATUS))
11237 fn = tg3_interrupt_tagged;
11238 flags = IRQF_SHARED;
11239 }
11240
11241 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11242 }
11243
tg3_test_interrupt(struct tg3 * tp)11244 static int tg3_test_interrupt(struct tg3 *tp)
11245 {
11246 struct tg3_napi *tnapi = &tp->napi[0];
11247 struct net_device *dev = tp->dev;
11248 int err, i, intr_ok = 0;
11249 u32 val;
11250
11251 if (!netif_running(dev))
11252 return -ENODEV;
11253
11254 tg3_disable_ints(tp);
11255
11256 free_irq(tnapi->irq_vec, tnapi);
11257
11258 /*
11259 * Turn off MSI one shot mode. Otherwise this test has no
11260 * observable way to know whether the interrupt was delivered.
11261 */
11262 if (tg3_flag(tp, 57765_PLUS)) {
11263 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11264 tw32(MSGINT_MODE, val);
11265 }
11266
11267 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11268 IRQF_SHARED, dev->name, tnapi);
11269 if (err)
11270 return err;
11271
11272 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11273 tg3_enable_ints(tp);
11274
11275 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11276 tnapi->coal_now);
11277
11278 for (i = 0; i < 5; i++) {
11279 u32 int_mbox, misc_host_ctrl;
11280
11281 int_mbox = tr32_mailbox(tnapi->int_mbox);
11282 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11283
11284 if ((int_mbox != 0) ||
11285 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11286 intr_ok = 1;
11287 break;
11288 }
11289
11290 if (tg3_flag(tp, 57765_PLUS) &&
11291 tnapi->hw_status->status_tag != tnapi->last_tag)
11292 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11293
11294 msleep(10);
11295 }
11296
11297 tg3_disable_ints(tp);
11298
11299 free_irq(tnapi->irq_vec, tnapi);
11300
11301 err = tg3_request_irq(tp, 0);
11302
11303 if (err)
11304 return err;
11305
11306 if (intr_ok) {
11307 /* Reenable MSI one shot mode. */
11308 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11309 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11310 tw32(MSGINT_MODE, val);
11311 }
11312 return 0;
11313 }
11314
11315 return -EIO;
11316 }
11317
11318 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11319 * successfully restored
11320 */
tg3_test_msi(struct tg3 * tp)11321 static int tg3_test_msi(struct tg3 *tp)
11322 {
11323 int err;
11324 u16 pci_cmd;
11325
11326 if (!tg3_flag(tp, USING_MSI))
11327 return 0;
11328
11329 /* Turn off SERR reporting in case MSI terminates with Master
11330 * Abort.
11331 */
11332 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11333 pci_write_config_word(tp->pdev, PCI_COMMAND,
11334 pci_cmd & ~PCI_COMMAND_SERR);
11335
11336 err = tg3_test_interrupt(tp);
11337
11338 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11339
11340 if (!err)
11341 return 0;
11342
11343 /* other failures */
11344 if (err != -EIO)
11345 return err;
11346
11347 /* MSI test failed, go back to INTx mode */
11348 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11349 "to INTx mode. Please report this failure to the PCI "
11350 "maintainer and include system chipset information\n");
11351
11352 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11353
11354 pci_disable_msi(tp->pdev);
11355
11356 tg3_flag_clear(tp, USING_MSI);
11357 tp->napi[0].irq_vec = tp->pdev->irq;
11358
11359 err = tg3_request_irq(tp, 0);
11360 if (err)
11361 return err;
11362
11363 /* Need to reset the chip because the MSI cycle may have terminated
11364 * with Master Abort.
11365 */
11366 tg3_full_lock(tp, 1);
11367
11368 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11369 err = tg3_init_hw(tp, true);
11370
11371 tg3_full_unlock(tp);
11372
11373 if (err)
11374 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11375
11376 return err;
11377 }
11378
tg3_request_firmware(struct tg3 * tp)11379 static int tg3_request_firmware(struct tg3 *tp)
11380 {
11381 const struct tg3_firmware_hdr *fw_hdr;
11382
11383 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11384 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11385 tp->fw_needed);
11386 return -ENOENT;
11387 }
11388
11389 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11390
11391 /* Firmware blob starts with version numbers, followed by
11392 * start address and _full_ length including BSS sections
11393 * (which must be longer than the actual data, of course
11394 */
11395
11396 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11397 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11398 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11399 tp->fw_len, tp->fw_needed);
11400 release_firmware(tp->fw);
11401 tp->fw = NULL;
11402 return -EINVAL;
11403 }
11404
11405 /* We no longer need firmware; we have it. */
11406 tp->fw_needed = NULL;
11407 return 0;
11408 }
11409
tg3_irq_count(struct tg3 * tp)11410 static u32 tg3_irq_count(struct tg3 *tp)
11411 {
11412 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11413
11414 if (irq_cnt > 1) {
11415 /* We want as many rx rings enabled as there are cpus.
11416 * In multiqueue MSI-X mode, the first MSI-X vector
11417 * only deals with link interrupts, etc, so we add
11418 * one to the number of vectors we are requesting.
11419 */
11420 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11421 }
11422
11423 return irq_cnt;
11424 }
11425
tg3_enable_msix(struct tg3 * tp)11426 static bool tg3_enable_msix(struct tg3 *tp)
11427 {
11428 int i, rc;
11429 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11430
11431 tp->txq_cnt = tp->txq_req;
11432 tp->rxq_cnt = tp->rxq_req;
11433 if (!tp->rxq_cnt)
11434 tp->rxq_cnt = netif_get_num_default_rss_queues();
11435 if (tp->rxq_cnt > tp->rxq_max)
11436 tp->rxq_cnt = tp->rxq_max;
11437
11438 /* Disable multiple TX rings by default. Simple round-robin hardware
11439 * scheduling of the TX rings can cause starvation of rings with
11440 * small packets when other rings have TSO or jumbo packets.
11441 */
11442 if (!tp->txq_req)
11443 tp->txq_cnt = 1;
11444
11445 tp->irq_cnt = tg3_irq_count(tp);
11446
11447 for (i = 0; i < tp->irq_max; i++) {
11448 msix_ent[i].entry = i;
11449 msix_ent[i].vector = 0;
11450 }
11451
11452 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11453 if (rc < 0) {
11454 return false;
11455 } else if (rc < tp->irq_cnt) {
11456 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11457 tp->irq_cnt, rc);
11458 tp->irq_cnt = rc;
11459 tp->rxq_cnt = max(rc - 1, 1);
11460 if (tp->txq_cnt)
11461 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11462 }
11463
11464 for (i = 0; i < tp->irq_max; i++)
11465 tp->napi[i].irq_vec = msix_ent[i].vector;
11466
11467 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11468 pci_disable_msix(tp->pdev);
11469 return false;
11470 }
11471
11472 if (tp->irq_cnt == 1)
11473 return true;
11474
11475 tg3_flag_set(tp, ENABLE_RSS);
11476
11477 if (tp->txq_cnt > 1)
11478 tg3_flag_set(tp, ENABLE_TSS);
11479
11480 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11481
11482 return true;
11483 }
11484
tg3_ints_init(struct tg3 * tp)11485 static void tg3_ints_init(struct tg3 *tp)
11486 {
11487 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11488 !tg3_flag(tp, TAGGED_STATUS)) {
11489 /* All MSI supporting chips should support tagged
11490 * status. Assert that this is the case.
11491 */
11492 netdev_warn(tp->dev,
11493 "MSI without TAGGED_STATUS? Not using MSI\n");
11494 goto defcfg;
11495 }
11496
11497 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11498 tg3_flag_set(tp, USING_MSIX);
11499 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11500 tg3_flag_set(tp, USING_MSI);
11501
11502 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11503 u32 msi_mode = tr32(MSGINT_MODE);
11504 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11505 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11506 if (!tg3_flag(tp, 1SHOT_MSI))
11507 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11508 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11509 }
11510 defcfg:
11511 if (!tg3_flag(tp, USING_MSIX)) {
11512 tp->irq_cnt = 1;
11513 tp->napi[0].irq_vec = tp->pdev->irq;
11514 }
11515
11516 if (tp->irq_cnt == 1) {
11517 tp->txq_cnt = 1;
11518 tp->rxq_cnt = 1;
11519 netif_set_real_num_tx_queues(tp->dev, 1);
11520 netif_set_real_num_rx_queues(tp->dev, 1);
11521 }
11522 }
11523
tg3_ints_fini(struct tg3 * tp)11524 static void tg3_ints_fini(struct tg3 *tp)
11525 {
11526 if (tg3_flag(tp, USING_MSIX))
11527 pci_disable_msix(tp->pdev);
11528 else if (tg3_flag(tp, USING_MSI))
11529 pci_disable_msi(tp->pdev);
11530 tg3_flag_clear(tp, USING_MSI);
11531 tg3_flag_clear(tp, USING_MSIX);
11532 tg3_flag_clear(tp, ENABLE_RSS);
11533 tg3_flag_clear(tp, ENABLE_TSS);
11534 }
11535
tg3_start(struct tg3 * tp,bool reset_phy,bool test_irq,bool init)11536 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11537 bool init)
11538 {
11539 struct net_device *dev = tp->dev;
11540 int i, err;
11541
11542 /*
11543 * Setup interrupts first so we know how
11544 * many NAPI resources to allocate
11545 */
11546 tg3_ints_init(tp);
11547
11548 tg3_rss_check_indir_tbl(tp);
11549
11550 /* The placement of this call is tied
11551 * to the setup and use of Host TX descriptors.
11552 */
11553 err = tg3_alloc_consistent(tp);
11554 if (err)
11555 goto out_ints_fini;
11556
11557 tg3_napi_init(tp);
11558
11559 tg3_napi_enable(tp);
11560
11561 for (i = 0; i < tp->irq_cnt; i++) {
11562 struct tg3_napi *tnapi = &tp->napi[i];
11563 err = tg3_request_irq(tp, i);
11564 if (err) {
11565 for (i--; i >= 0; i--) {
11566 tnapi = &tp->napi[i];
11567 free_irq(tnapi->irq_vec, tnapi);
11568 }
11569 goto out_napi_fini;
11570 }
11571 }
11572
11573 tg3_full_lock(tp, 0);
11574
11575 if (init)
11576 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11577
11578 err = tg3_init_hw(tp, reset_phy);
11579 if (err) {
11580 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11581 tg3_free_rings(tp);
11582 }
11583
11584 tg3_full_unlock(tp);
11585
11586 if (err)
11587 goto out_free_irq;
11588
11589 if (test_irq && tg3_flag(tp, USING_MSI)) {
11590 err = tg3_test_msi(tp);
11591
11592 if (err) {
11593 tg3_full_lock(tp, 0);
11594 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11595 tg3_free_rings(tp);
11596 tg3_full_unlock(tp);
11597
11598 goto out_napi_fini;
11599 }
11600
11601 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11602 u32 val = tr32(PCIE_TRANSACTION_CFG);
11603
11604 tw32(PCIE_TRANSACTION_CFG,
11605 val | PCIE_TRANS_CFG_1SHOT_MSI);
11606 }
11607 }
11608
11609 tg3_phy_start(tp);
11610
11611 tg3_hwmon_open(tp);
11612
11613 tg3_full_lock(tp, 0);
11614
11615 tg3_timer_start(tp);
11616 tg3_flag_set(tp, INIT_COMPLETE);
11617 tg3_enable_ints(tp);
11618
11619 tg3_ptp_resume(tp);
11620
11621 tg3_full_unlock(tp);
11622
11623 netif_tx_start_all_queues(dev);
11624
11625 /*
11626 * Reset loopback feature if it was turned on while the device was down
11627 * make sure that it's installed properly now.
11628 */
11629 if (dev->features & NETIF_F_LOOPBACK)
11630 tg3_set_loopback(dev, dev->features);
11631
11632 return 0;
11633
11634 out_free_irq:
11635 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11636 struct tg3_napi *tnapi = &tp->napi[i];
11637 free_irq(tnapi->irq_vec, tnapi);
11638 }
11639
11640 out_napi_fini:
11641 tg3_napi_disable(tp);
11642 tg3_napi_fini(tp);
11643 tg3_free_consistent(tp);
11644
11645 out_ints_fini:
11646 tg3_ints_fini(tp);
11647
11648 return err;
11649 }
11650
tg3_stop(struct tg3 * tp)11651 static void tg3_stop(struct tg3 *tp)
11652 {
11653 int i;
11654
11655 tg3_reset_task_cancel(tp);
11656 tg3_netif_stop(tp);
11657
11658 tg3_timer_stop(tp);
11659
11660 tg3_hwmon_close(tp);
11661
11662 tg3_phy_stop(tp);
11663
11664 tg3_full_lock(tp, 1);
11665
11666 tg3_disable_ints(tp);
11667
11668 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11669 tg3_free_rings(tp);
11670 tg3_flag_clear(tp, INIT_COMPLETE);
11671
11672 tg3_full_unlock(tp);
11673
11674 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11675 struct tg3_napi *tnapi = &tp->napi[i];
11676 free_irq(tnapi->irq_vec, tnapi);
11677 }
11678
11679 tg3_ints_fini(tp);
11680
11681 tg3_napi_fini(tp);
11682
11683 tg3_free_consistent(tp);
11684 }
11685
tg3_open(struct net_device * dev)11686 static int tg3_open(struct net_device *dev)
11687 {
11688 struct tg3 *tp = netdev_priv(dev);
11689 int err;
11690
11691 if (tp->pcierr_recovery) {
11692 netdev_err(dev, "Failed to open device. PCI error recovery "
11693 "in progress\n");
11694 return -EAGAIN;
11695 }
11696
11697 if (tp->fw_needed) {
11698 err = tg3_request_firmware(tp);
11699 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11700 if (err) {
11701 netdev_warn(tp->dev, "EEE capability disabled\n");
11702 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11703 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11704 netdev_warn(tp->dev, "EEE capability restored\n");
11705 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11706 }
11707 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11708 if (err)
11709 return err;
11710 } else if (err) {
11711 netdev_warn(tp->dev, "TSO capability disabled\n");
11712 tg3_flag_clear(tp, TSO_CAPABLE);
11713 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11714 netdev_notice(tp->dev, "TSO capability restored\n");
11715 tg3_flag_set(tp, TSO_CAPABLE);
11716 }
11717 }
11718
11719 tg3_carrier_off(tp);
11720
11721 err = tg3_power_up(tp);
11722 if (err)
11723 return err;
11724
11725 tg3_full_lock(tp, 0);
11726
11727 tg3_disable_ints(tp);
11728 tg3_flag_clear(tp, INIT_COMPLETE);
11729
11730 tg3_full_unlock(tp);
11731
11732 err = tg3_start(tp,
11733 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11734 true, true);
11735 if (err) {
11736 tg3_frob_aux_power(tp, false);
11737 pci_set_power_state(tp->pdev, PCI_D3hot);
11738 }
11739
11740 return err;
11741 }
11742
tg3_close(struct net_device * dev)11743 static int tg3_close(struct net_device *dev)
11744 {
11745 struct tg3 *tp = netdev_priv(dev);
11746
11747 if (tp->pcierr_recovery) {
11748 netdev_err(dev, "Failed to close device. PCI error recovery "
11749 "in progress\n");
11750 return -EAGAIN;
11751 }
11752
11753 tg3_stop(tp);
11754
11755 /* Clear stats across close / open calls */
11756 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11757 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11758
11759 if (pci_device_is_present(tp->pdev)) {
11760 tg3_power_down_prepare(tp);
11761
11762 tg3_carrier_off(tp);
11763 }
11764 return 0;
11765 }
11766
get_stat64(tg3_stat64_t * val)11767 static inline u64 get_stat64(tg3_stat64_t *val)
11768 {
11769 return ((u64)val->high << 32) | ((u64)val->low);
11770 }
11771
tg3_calc_crc_errors(struct tg3 * tp)11772 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11773 {
11774 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11775
11776 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11777 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11778 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11779 u32 val;
11780
11781 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11782 tg3_writephy(tp, MII_TG3_TEST1,
11783 val | MII_TG3_TEST1_CRC_EN);
11784 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11785 } else
11786 val = 0;
11787
11788 tp->phy_crc_errors += val;
11789
11790 return tp->phy_crc_errors;
11791 }
11792
11793 return get_stat64(&hw_stats->rx_fcs_errors);
11794 }
11795
11796 #define ESTAT_ADD(member) \
11797 estats->member = old_estats->member + \
11798 get_stat64(&hw_stats->member)
11799
tg3_get_estats(struct tg3 * tp,struct tg3_ethtool_stats * estats)11800 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11801 {
11802 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11803 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11804
11805 ESTAT_ADD(rx_octets);
11806 ESTAT_ADD(rx_fragments);
11807 ESTAT_ADD(rx_ucast_packets);
11808 ESTAT_ADD(rx_mcast_packets);
11809 ESTAT_ADD(rx_bcast_packets);
11810 ESTAT_ADD(rx_fcs_errors);
11811 ESTAT_ADD(rx_align_errors);
11812 ESTAT_ADD(rx_xon_pause_rcvd);
11813 ESTAT_ADD(rx_xoff_pause_rcvd);
11814 ESTAT_ADD(rx_mac_ctrl_rcvd);
11815 ESTAT_ADD(rx_xoff_entered);
11816 ESTAT_ADD(rx_frame_too_long_errors);
11817 ESTAT_ADD(rx_jabbers);
11818 ESTAT_ADD(rx_undersize_packets);
11819 ESTAT_ADD(rx_in_length_errors);
11820 ESTAT_ADD(rx_out_length_errors);
11821 ESTAT_ADD(rx_64_or_less_octet_packets);
11822 ESTAT_ADD(rx_65_to_127_octet_packets);
11823 ESTAT_ADD(rx_128_to_255_octet_packets);
11824 ESTAT_ADD(rx_256_to_511_octet_packets);
11825 ESTAT_ADD(rx_512_to_1023_octet_packets);
11826 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11827 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11828 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11829 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11830 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11831
11832 ESTAT_ADD(tx_octets);
11833 ESTAT_ADD(tx_collisions);
11834 ESTAT_ADD(tx_xon_sent);
11835 ESTAT_ADD(tx_xoff_sent);
11836 ESTAT_ADD(tx_flow_control);
11837 ESTAT_ADD(tx_mac_errors);
11838 ESTAT_ADD(tx_single_collisions);
11839 ESTAT_ADD(tx_mult_collisions);
11840 ESTAT_ADD(tx_deferred);
11841 ESTAT_ADD(tx_excessive_collisions);
11842 ESTAT_ADD(tx_late_collisions);
11843 ESTAT_ADD(tx_collide_2times);
11844 ESTAT_ADD(tx_collide_3times);
11845 ESTAT_ADD(tx_collide_4times);
11846 ESTAT_ADD(tx_collide_5times);
11847 ESTAT_ADD(tx_collide_6times);
11848 ESTAT_ADD(tx_collide_7times);
11849 ESTAT_ADD(tx_collide_8times);
11850 ESTAT_ADD(tx_collide_9times);
11851 ESTAT_ADD(tx_collide_10times);
11852 ESTAT_ADD(tx_collide_11times);
11853 ESTAT_ADD(tx_collide_12times);
11854 ESTAT_ADD(tx_collide_13times);
11855 ESTAT_ADD(tx_collide_14times);
11856 ESTAT_ADD(tx_collide_15times);
11857 ESTAT_ADD(tx_ucast_packets);
11858 ESTAT_ADD(tx_mcast_packets);
11859 ESTAT_ADD(tx_bcast_packets);
11860 ESTAT_ADD(tx_carrier_sense_errors);
11861 ESTAT_ADD(tx_discards);
11862 ESTAT_ADD(tx_errors);
11863
11864 ESTAT_ADD(dma_writeq_full);
11865 ESTAT_ADD(dma_write_prioq_full);
11866 ESTAT_ADD(rxbds_empty);
11867 ESTAT_ADD(rx_discards);
11868 ESTAT_ADD(rx_errors);
11869 ESTAT_ADD(rx_threshold_hit);
11870
11871 ESTAT_ADD(dma_readq_full);
11872 ESTAT_ADD(dma_read_prioq_full);
11873 ESTAT_ADD(tx_comp_queue_full);
11874
11875 ESTAT_ADD(ring_set_send_prod_index);
11876 ESTAT_ADD(ring_status_update);
11877 ESTAT_ADD(nic_irqs);
11878 ESTAT_ADD(nic_avoided_irqs);
11879 ESTAT_ADD(nic_tx_threshold_hit);
11880
11881 ESTAT_ADD(mbuf_lwm_thresh_hit);
11882 }
11883
tg3_get_nstats(struct tg3 * tp,struct rtnl_link_stats64 * stats)11884 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11885 {
11886 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11887 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11888
11889 stats->rx_packets = old_stats->rx_packets +
11890 get_stat64(&hw_stats->rx_ucast_packets) +
11891 get_stat64(&hw_stats->rx_mcast_packets) +
11892 get_stat64(&hw_stats->rx_bcast_packets);
11893
11894 stats->tx_packets = old_stats->tx_packets +
11895 get_stat64(&hw_stats->tx_ucast_packets) +
11896 get_stat64(&hw_stats->tx_mcast_packets) +
11897 get_stat64(&hw_stats->tx_bcast_packets);
11898
11899 stats->rx_bytes = old_stats->rx_bytes +
11900 get_stat64(&hw_stats->rx_octets);
11901 stats->tx_bytes = old_stats->tx_bytes +
11902 get_stat64(&hw_stats->tx_octets);
11903
11904 stats->rx_errors = old_stats->rx_errors +
11905 get_stat64(&hw_stats->rx_errors);
11906 stats->tx_errors = old_stats->tx_errors +
11907 get_stat64(&hw_stats->tx_errors) +
11908 get_stat64(&hw_stats->tx_mac_errors) +
11909 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11910 get_stat64(&hw_stats->tx_discards);
11911
11912 stats->multicast = old_stats->multicast +
11913 get_stat64(&hw_stats->rx_mcast_packets);
11914 stats->collisions = old_stats->collisions +
11915 get_stat64(&hw_stats->tx_collisions);
11916
11917 stats->rx_length_errors = old_stats->rx_length_errors +
11918 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11919 get_stat64(&hw_stats->rx_undersize_packets);
11920
11921 stats->rx_frame_errors = old_stats->rx_frame_errors +
11922 get_stat64(&hw_stats->rx_align_errors);
11923 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11924 get_stat64(&hw_stats->tx_discards);
11925 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11926 get_stat64(&hw_stats->tx_carrier_sense_errors);
11927
11928 stats->rx_crc_errors = old_stats->rx_crc_errors +
11929 tg3_calc_crc_errors(tp);
11930
11931 stats->rx_missed_errors = old_stats->rx_missed_errors +
11932 get_stat64(&hw_stats->rx_discards);
11933
11934 stats->rx_dropped = tp->rx_dropped;
11935 stats->tx_dropped = tp->tx_dropped;
11936 }
11937
tg3_get_regs_len(struct net_device * dev)11938 static int tg3_get_regs_len(struct net_device *dev)
11939 {
11940 return TG3_REG_BLK_SIZE;
11941 }
11942
tg3_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)11943 static void tg3_get_regs(struct net_device *dev,
11944 struct ethtool_regs *regs, void *_p)
11945 {
11946 struct tg3 *tp = netdev_priv(dev);
11947
11948 regs->version = 0;
11949
11950 memset(_p, 0, TG3_REG_BLK_SIZE);
11951
11952 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11953 return;
11954
11955 tg3_full_lock(tp, 0);
11956
11957 tg3_dump_legacy_regs(tp, (u32 *)_p);
11958
11959 tg3_full_unlock(tp);
11960 }
11961
tg3_get_eeprom_len(struct net_device * dev)11962 static int tg3_get_eeprom_len(struct net_device *dev)
11963 {
11964 struct tg3 *tp = netdev_priv(dev);
11965
11966 return tp->nvram_size;
11967 }
11968
tg3_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)11969 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11970 {
11971 struct tg3 *tp = netdev_priv(dev);
11972 int ret, cpmu_restore = 0;
11973 u8 *pd;
11974 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11975 __be32 val;
11976
11977 if (tg3_flag(tp, NO_NVRAM))
11978 return -EINVAL;
11979
11980 offset = eeprom->offset;
11981 len = eeprom->len;
11982 eeprom->len = 0;
11983
11984 eeprom->magic = TG3_EEPROM_MAGIC;
11985
11986 /* Override clock, link aware and link idle modes */
11987 if (tg3_flag(tp, CPMU_PRESENT)) {
11988 cpmu_val = tr32(TG3_CPMU_CTRL);
11989 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11990 CPMU_CTRL_LINK_IDLE_MODE)) {
11991 tw32(TG3_CPMU_CTRL, cpmu_val &
11992 ~(CPMU_CTRL_LINK_AWARE_MODE |
11993 CPMU_CTRL_LINK_IDLE_MODE));
11994 cpmu_restore = 1;
11995 }
11996 }
11997 tg3_override_clk(tp);
11998
11999 if (offset & 3) {
12000 /* adjustments to start on required 4 byte boundary */
12001 b_offset = offset & 3;
12002 b_count = 4 - b_offset;
12003 if (b_count > len) {
12004 /* i.e. offset=1 len=2 */
12005 b_count = len;
12006 }
12007 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12008 if (ret)
12009 goto eeprom_done;
12010 memcpy(data, ((char *)&val) + b_offset, b_count);
12011 len -= b_count;
12012 offset += b_count;
12013 eeprom->len += b_count;
12014 }
12015
12016 /* read bytes up to the last 4 byte boundary */
12017 pd = &data[eeprom->len];
12018 for (i = 0; i < (len - (len & 3)); i += 4) {
12019 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12020 if (ret) {
12021 if (i)
12022 i -= 4;
12023 eeprom->len += i;
12024 goto eeprom_done;
12025 }
12026 memcpy(pd + i, &val, 4);
12027 if (need_resched()) {
12028 if (signal_pending(current)) {
12029 eeprom->len += i;
12030 ret = -EINTR;
12031 goto eeprom_done;
12032 }
12033 cond_resched();
12034 }
12035 }
12036 eeprom->len += i;
12037
12038 if (len & 3) {
12039 /* read last bytes not ending on 4 byte boundary */
12040 pd = &data[eeprom->len];
12041 b_count = len & 3;
12042 b_offset = offset + len - b_count;
12043 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12044 if (ret)
12045 goto eeprom_done;
12046 memcpy(pd, &val, b_count);
12047 eeprom->len += b_count;
12048 }
12049 ret = 0;
12050
12051 eeprom_done:
12052 /* Restore clock, link aware and link idle modes */
12053 tg3_restore_clk(tp);
12054 if (cpmu_restore)
12055 tw32(TG3_CPMU_CTRL, cpmu_val);
12056
12057 return ret;
12058 }
12059
tg3_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12060 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12061 {
12062 struct tg3 *tp = netdev_priv(dev);
12063 int ret;
12064 u32 offset, len, b_offset, odd_len;
12065 u8 *buf;
12066 __be32 start = 0, end;
12067
12068 if (tg3_flag(tp, NO_NVRAM) ||
12069 eeprom->magic != TG3_EEPROM_MAGIC)
12070 return -EINVAL;
12071
12072 offset = eeprom->offset;
12073 len = eeprom->len;
12074
12075 if ((b_offset = (offset & 3))) {
12076 /* adjustments to start on required 4 byte boundary */
12077 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12078 if (ret)
12079 return ret;
12080 len += b_offset;
12081 offset &= ~3;
12082 if (len < 4)
12083 len = 4;
12084 }
12085
12086 odd_len = 0;
12087 if (len & 3) {
12088 /* adjustments to end on required 4 byte boundary */
12089 odd_len = 1;
12090 len = (len + 3) & ~3;
12091 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12092 if (ret)
12093 return ret;
12094 }
12095
12096 buf = data;
12097 if (b_offset || odd_len) {
12098 buf = kmalloc(len, GFP_KERNEL);
12099 if (!buf)
12100 return -ENOMEM;
12101 if (b_offset)
12102 memcpy(buf, &start, 4);
12103 if (odd_len)
12104 memcpy(buf+len-4, &end, 4);
12105 memcpy(buf + b_offset, data, eeprom->len);
12106 }
12107
12108 ret = tg3_nvram_write_block(tp, offset, len, buf);
12109
12110 if (buf != data)
12111 kfree(buf);
12112
12113 return ret;
12114 }
12115
tg3_get_settings(struct net_device * dev,struct ethtool_cmd * cmd)12116 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
12117 {
12118 struct tg3 *tp = netdev_priv(dev);
12119
12120 if (tg3_flag(tp, USE_PHYLIB)) {
12121 struct phy_device *phydev;
12122 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12123 return -EAGAIN;
12124 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12125 return phy_ethtool_gset(phydev, cmd);
12126 }
12127
12128 cmd->supported = (SUPPORTED_Autoneg);
12129
12130 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12131 cmd->supported |= (SUPPORTED_1000baseT_Half |
12132 SUPPORTED_1000baseT_Full);
12133
12134 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12135 cmd->supported |= (SUPPORTED_100baseT_Half |
12136 SUPPORTED_100baseT_Full |
12137 SUPPORTED_10baseT_Half |
12138 SUPPORTED_10baseT_Full |
12139 SUPPORTED_TP);
12140 cmd->port = PORT_TP;
12141 } else {
12142 cmd->supported |= SUPPORTED_FIBRE;
12143 cmd->port = PORT_FIBRE;
12144 }
12145
12146 cmd->advertising = tp->link_config.advertising;
12147 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12148 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12149 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12150 cmd->advertising |= ADVERTISED_Pause;
12151 } else {
12152 cmd->advertising |= ADVERTISED_Pause |
12153 ADVERTISED_Asym_Pause;
12154 }
12155 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12156 cmd->advertising |= ADVERTISED_Asym_Pause;
12157 }
12158 }
12159 if (netif_running(dev) && tp->link_up) {
12160 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
12161 cmd->duplex = tp->link_config.active_duplex;
12162 cmd->lp_advertising = tp->link_config.rmt_adv;
12163 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12164 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12165 cmd->eth_tp_mdix = ETH_TP_MDI_X;
12166 else
12167 cmd->eth_tp_mdix = ETH_TP_MDI;
12168 }
12169 } else {
12170 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
12171 cmd->duplex = DUPLEX_UNKNOWN;
12172 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
12173 }
12174 cmd->phy_address = tp->phy_addr;
12175 cmd->transceiver = XCVR_INTERNAL;
12176 cmd->autoneg = tp->link_config.autoneg;
12177 cmd->maxtxpkt = 0;
12178 cmd->maxrxpkt = 0;
12179 return 0;
12180 }
12181
tg3_set_settings(struct net_device * dev,struct ethtool_cmd * cmd)12182 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
12183 {
12184 struct tg3 *tp = netdev_priv(dev);
12185 u32 speed = ethtool_cmd_speed(cmd);
12186
12187 if (tg3_flag(tp, USE_PHYLIB)) {
12188 struct phy_device *phydev;
12189 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12190 return -EAGAIN;
12191 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12192 return phy_ethtool_sset(phydev, cmd);
12193 }
12194
12195 if (cmd->autoneg != AUTONEG_ENABLE &&
12196 cmd->autoneg != AUTONEG_DISABLE)
12197 return -EINVAL;
12198
12199 if (cmd->autoneg == AUTONEG_DISABLE &&
12200 cmd->duplex != DUPLEX_FULL &&
12201 cmd->duplex != DUPLEX_HALF)
12202 return -EINVAL;
12203
12204 if (cmd->autoneg == AUTONEG_ENABLE) {
12205 u32 mask = ADVERTISED_Autoneg |
12206 ADVERTISED_Pause |
12207 ADVERTISED_Asym_Pause;
12208
12209 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12210 mask |= ADVERTISED_1000baseT_Half |
12211 ADVERTISED_1000baseT_Full;
12212
12213 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12214 mask |= ADVERTISED_100baseT_Half |
12215 ADVERTISED_100baseT_Full |
12216 ADVERTISED_10baseT_Half |
12217 ADVERTISED_10baseT_Full |
12218 ADVERTISED_TP;
12219 else
12220 mask |= ADVERTISED_FIBRE;
12221
12222 if (cmd->advertising & ~mask)
12223 return -EINVAL;
12224
12225 mask &= (ADVERTISED_1000baseT_Half |
12226 ADVERTISED_1000baseT_Full |
12227 ADVERTISED_100baseT_Half |
12228 ADVERTISED_100baseT_Full |
12229 ADVERTISED_10baseT_Half |
12230 ADVERTISED_10baseT_Full);
12231
12232 cmd->advertising &= mask;
12233 } else {
12234 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12235 if (speed != SPEED_1000)
12236 return -EINVAL;
12237
12238 if (cmd->duplex != DUPLEX_FULL)
12239 return -EINVAL;
12240 } else {
12241 if (speed != SPEED_100 &&
12242 speed != SPEED_10)
12243 return -EINVAL;
12244 }
12245 }
12246
12247 tg3_full_lock(tp, 0);
12248
12249 tp->link_config.autoneg = cmd->autoneg;
12250 if (cmd->autoneg == AUTONEG_ENABLE) {
12251 tp->link_config.advertising = (cmd->advertising |
12252 ADVERTISED_Autoneg);
12253 tp->link_config.speed = SPEED_UNKNOWN;
12254 tp->link_config.duplex = DUPLEX_UNKNOWN;
12255 } else {
12256 tp->link_config.advertising = 0;
12257 tp->link_config.speed = speed;
12258 tp->link_config.duplex = cmd->duplex;
12259 }
12260
12261 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12262
12263 tg3_warn_mgmt_link_flap(tp);
12264
12265 if (netif_running(dev))
12266 tg3_setup_phy(tp, true);
12267
12268 tg3_full_unlock(tp);
12269
12270 return 0;
12271 }
12272
tg3_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)12273 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12274 {
12275 struct tg3 *tp = netdev_priv(dev);
12276
12277 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12278 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12279 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12280 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12281 }
12282
tg3_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12283 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12284 {
12285 struct tg3 *tp = netdev_priv(dev);
12286
12287 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12288 wol->supported = WAKE_MAGIC;
12289 else
12290 wol->supported = 0;
12291 wol->wolopts = 0;
12292 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12293 wol->wolopts = WAKE_MAGIC;
12294 memset(&wol->sopass, 0, sizeof(wol->sopass));
12295 }
12296
tg3_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12297 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12298 {
12299 struct tg3 *tp = netdev_priv(dev);
12300 struct device *dp = &tp->pdev->dev;
12301
12302 if (wol->wolopts & ~WAKE_MAGIC)
12303 return -EINVAL;
12304 if ((wol->wolopts & WAKE_MAGIC) &&
12305 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12306 return -EINVAL;
12307
12308 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12309
12310 if (device_may_wakeup(dp))
12311 tg3_flag_set(tp, WOL_ENABLE);
12312 else
12313 tg3_flag_clear(tp, WOL_ENABLE);
12314
12315 return 0;
12316 }
12317
tg3_get_msglevel(struct net_device * dev)12318 static u32 tg3_get_msglevel(struct net_device *dev)
12319 {
12320 struct tg3 *tp = netdev_priv(dev);
12321 return tp->msg_enable;
12322 }
12323
tg3_set_msglevel(struct net_device * dev,u32 value)12324 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12325 {
12326 struct tg3 *tp = netdev_priv(dev);
12327 tp->msg_enable = value;
12328 }
12329
tg3_nway_reset(struct net_device * dev)12330 static int tg3_nway_reset(struct net_device *dev)
12331 {
12332 struct tg3 *tp = netdev_priv(dev);
12333 int r;
12334
12335 if (!netif_running(dev))
12336 return -EAGAIN;
12337
12338 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12339 return -EINVAL;
12340
12341 tg3_warn_mgmt_link_flap(tp);
12342
12343 if (tg3_flag(tp, USE_PHYLIB)) {
12344 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12345 return -EAGAIN;
12346 r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
12347 } else {
12348 u32 bmcr;
12349
12350 spin_lock_bh(&tp->lock);
12351 r = -EINVAL;
12352 tg3_readphy(tp, MII_BMCR, &bmcr);
12353 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12354 ((bmcr & BMCR_ANENABLE) ||
12355 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12356 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12357 BMCR_ANENABLE);
12358 r = 0;
12359 }
12360 spin_unlock_bh(&tp->lock);
12361 }
12362
12363 return r;
12364 }
12365
tg3_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)12366 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12367 {
12368 struct tg3 *tp = netdev_priv(dev);
12369
12370 ering->rx_max_pending = tp->rx_std_ring_mask;
12371 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12372 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12373 else
12374 ering->rx_jumbo_max_pending = 0;
12375
12376 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12377
12378 ering->rx_pending = tp->rx_pending;
12379 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12380 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12381 else
12382 ering->rx_jumbo_pending = 0;
12383
12384 ering->tx_pending = tp->napi[0].tx_pending;
12385 }
12386
tg3_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)12387 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12388 {
12389 struct tg3 *tp = netdev_priv(dev);
12390 int i, irq_sync = 0, err = 0;
12391 bool reset_phy = false;
12392
12393 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12394 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12395 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12396 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12397 (tg3_flag(tp, TSO_BUG) &&
12398 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12399 return -EINVAL;
12400
12401 if (netif_running(dev)) {
12402 tg3_phy_stop(tp);
12403 tg3_netif_stop(tp);
12404 irq_sync = 1;
12405 }
12406
12407 tg3_full_lock(tp, irq_sync);
12408
12409 tp->rx_pending = ering->rx_pending;
12410
12411 if (tg3_flag(tp, MAX_RXPEND_64) &&
12412 tp->rx_pending > 63)
12413 tp->rx_pending = 63;
12414
12415 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12416 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12417
12418 for (i = 0; i < tp->irq_max; i++)
12419 tp->napi[i].tx_pending = ering->tx_pending;
12420
12421 if (netif_running(dev)) {
12422 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12423 /* Reset PHY to avoid PHY lock up */
12424 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12425 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12426 tg3_asic_rev(tp) == ASIC_REV_5720)
12427 reset_phy = true;
12428
12429 err = tg3_restart_hw(tp, reset_phy);
12430 if (!err)
12431 tg3_netif_start(tp);
12432 }
12433
12434 tg3_full_unlock(tp);
12435
12436 if (irq_sync && !err)
12437 tg3_phy_start(tp);
12438
12439 return err;
12440 }
12441
tg3_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12442 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12443 {
12444 struct tg3 *tp = netdev_priv(dev);
12445
12446 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12447
12448 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12449 epause->rx_pause = 1;
12450 else
12451 epause->rx_pause = 0;
12452
12453 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12454 epause->tx_pause = 1;
12455 else
12456 epause->tx_pause = 0;
12457 }
12458
tg3_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12459 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12460 {
12461 struct tg3 *tp = netdev_priv(dev);
12462 int err = 0;
12463 bool reset_phy = false;
12464
12465 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12466 tg3_warn_mgmt_link_flap(tp);
12467
12468 if (tg3_flag(tp, USE_PHYLIB)) {
12469 u32 newadv;
12470 struct phy_device *phydev;
12471
12472 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12473
12474 if (!(phydev->supported & SUPPORTED_Pause) ||
12475 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12476 (epause->rx_pause != epause->tx_pause)))
12477 return -EINVAL;
12478
12479 tp->link_config.flowctrl = 0;
12480 if (epause->rx_pause) {
12481 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12482
12483 if (epause->tx_pause) {
12484 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12485 newadv = ADVERTISED_Pause;
12486 } else
12487 newadv = ADVERTISED_Pause |
12488 ADVERTISED_Asym_Pause;
12489 } else if (epause->tx_pause) {
12490 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12491 newadv = ADVERTISED_Asym_Pause;
12492 } else
12493 newadv = 0;
12494
12495 if (epause->autoneg)
12496 tg3_flag_set(tp, PAUSE_AUTONEG);
12497 else
12498 tg3_flag_clear(tp, PAUSE_AUTONEG);
12499
12500 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12501 u32 oldadv = phydev->advertising &
12502 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12503 if (oldadv != newadv) {
12504 phydev->advertising &=
12505 ~(ADVERTISED_Pause |
12506 ADVERTISED_Asym_Pause);
12507 phydev->advertising |= newadv;
12508 if (phydev->autoneg) {
12509 /*
12510 * Always renegotiate the link to
12511 * inform our link partner of our
12512 * flow control settings, even if the
12513 * flow control is forced. Let
12514 * tg3_adjust_link() do the final
12515 * flow control setup.
12516 */
12517 return phy_start_aneg(phydev);
12518 }
12519 }
12520
12521 if (!epause->autoneg)
12522 tg3_setup_flow_control(tp, 0, 0);
12523 } else {
12524 tp->link_config.advertising &=
12525 ~(ADVERTISED_Pause |
12526 ADVERTISED_Asym_Pause);
12527 tp->link_config.advertising |= newadv;
12528 }
12529 } else {
12530 int irq_sync = 0;
12531
12532 if (netif_running(dev)) {
12533 tg3_netif_stop(tp);
12534 irq_sync = 1;
12535 }
12536
12537 tg3_full_lock(tp, irq_sync);
12538
12539 if (epause->autoneg)
12540 tg3_flag_set(tp, PAUSE_AUTONEG);
12541 else
12542 tg3_flag_clear(tp, PAUSE_AUTONEG);
12543 if (epause->rx_pause)
12544 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12545 else
12546 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12547 if (epause->tx_pause)
12548 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12549 else
12550 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12551
12552 if (netif_running(dev)) {
12553 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12554 /* Reset PHY to avoid PHY lock up */
12555 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12556 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12557 tg3_asic_rev(tp) == ASIC_REV_5720)
12558 reset_phy = true;
12559
12560 err = tg3_restart_hw(tp, reset_phy);
12561 if (!err)
12562 tg3_netif_start(tp);
12563 }
12564
12565 tg3_full_unlock(tp);
12566 }
12567
12568 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12569
12570 return err;
12571 }
12572
tg3_get_sset_count(struct net_device * dev,int sset)12573 static int tg3_get_sset_count(struct net_device *dev, int sset)
12574 {
12575 switch (sset) {
12576 case ETH_SS_TEST:
12577 return TG3_NUM_TEST;
12578 case ETH_SS_STATS:
12579 return TG3_NUM_STATS;
12580 default:
12581 return -EOPNOTSUPP;
12582 }
12583 }
12584
tg3_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules __always_unused)12585 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12586 u32 *rules __always_unused)
12587 {
12588 struct tg3 *tp = netdev_priv(dev);
12589
12590 if (!tg3_flag(tp, SUPPORT_MSIX))
12591 return -EOPNOTSUPP;
12592
12593 switch (info->cmd) {
12594 case ETHTOOL_GRXRINGS:
12595 if (netif_running(tp->dev))
12596 info->data = tp->rxq_cnt;
12597 else {
12598 info->data = num_online_cpus();
12599 if (info->data > TG3_RSS_MAX_NUM_QS)
12600 info->data = TG3_RSS_MAX_NUM_QS;
12601 }
12602
12603 /* The first interrupt vector only
12604 * handles link interrupts.
12605 */
12606 info->data -= 1;
12607 return 0;
12608
12609 default:
12610 return -EOPNOTSUPP;
12611 }
12612 }
12613
tg3_get_rxfh_indir_size(struct net_device * dev)12614 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12615 {
12616 u32 size = 0;
12617 struct tg3 *tp = netdev_priv(dev);
12618
12619 if (tg3_flag(tp, SUPPORT_MSIX))
12620 size = TG3_RSS_INDIR_TBL_SIZE;
12621
12622 return size;
12623 }
12624
tg3_get_rxfh(struct net_device * dev,u32 * indir,u8 * key,u8 * hfunc)12625 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12626 {
12627 struct tg3 *tp = netdev_priv(dev);
12628 int i;
12629
12630 if (hfunc)
12631 *hfunc = ETH_RSS_HASH_TOP;
12632 if (!indir)
12633 return 0;
12634
12635 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12636 indir[i] = tp->rss_ind_tbl[i];
12637
12638 return 0;
12639 }
12640
tg3_set_rxfh(struct net_device * dev,const u32 * indir,const u8 * key,const u8 hfunc)12641 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12642 const u8 hfunc)
12643 {
12644 struct tg3 *tp = netdev_priv(dev);
12645 size_t i;
12646
12647 /* We require at least one supported parameter to be changed and no
12648 * change in any of the unsupported parameters
12649 */
12650 if (key ||
12651 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12652 return -EOPNOTSUPP;
12653
12654 if (!indir)
12655 return 0;
12656
12657 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12658 tp->rss_ind_tbl[i] = indir[i];
12659
12660 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12661 return 0;
12662
12663 /* It is legal to write the indirection
12664 * table while the device is running.
12665 */
12666 tg3_full_lock(tp, 0);
12667 tg3_rss_write_indir_tbl(tp);
12668 tg3_full_unlock(tp);
12669
12670 return 0;
12671 }
12672
tg3_get_channels(struct net_device * dev,struct ethtool_channels * channel)12673 static void tg3_get_channels(struct net_device *dev,
12674 struct ethtool_channels *channel)
12675 {
12676 struct tg3 *tp = netdev_priv(dev);
12677 u32 deflt_qs = netif_get_num_default_rss_queues();
12678
12679 channel->max_rx = tp->rxq_max;
12680 channel->max_tx = tp->txq_max;
12681
12682 if (netif_running(dev)) {
12683 channel->rx_count = tp->rxq_cnt;
12684 channel->tx_count = tp->txq_cnt;
12685 } else {
12686 if (tp->rxq_req)
12687 channel->rx_count = tp->rxq_req;
12688 else
12689 channel->rx_count = min(deflt_qs, tp->rxq_max);
12690
12691 if (tp->txq_req)
12692 channel->tx_count = tp->txq_req;
12693 else
12694 channel->tx_count = min(deflt_qs, tp->txq_max);
12695 }
12696 }
12697
tg3_set_channels(struct net_device * dev,struct ethtool_channels * channel)12698 static int tg3_set_channels(struct net_device *dev,
12699 struct ethtool_channels *channel)
12700 {
12701 struct tg3 *tp = netdev_priv(dev);
12702
12703 if (!tg3_flag(tp, SUPPORT_MSIX))
12704 return -EOPNOTSUPP;
12705
12706 if (channel->rx_count > tp->rxq_max ||
12707 channel->tx_count > tp->txq_max)
12708 return -EINVAL;
12709
12710 tp->rxq_req = channel->rx_count;
12711 tp->txq_req = channel->tx_count;
12712
12713 if (!netif_running(dev))
12714 return 0;
12715
12716 tg3_stop(tp);
12717
12718 tg3_carrier_off(tp);
12719
12720 tg3_start(tp, true, false, false);
12721
12722 return 0;
12723 }
12724
tg3_get_strings(struct net_device * dev,u32 stringset,u8 * buf)12725 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12726 {
12727 switch (stringset) {
12728 case ETH_SS_STATS:
12729 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12730 break;
12731 case ETH_SS_TEST:
12732 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12733 break;
12734 default:
12735 WARN_ON(1); /* we need a WARN() */
12736 break;
12737 }
12738 }
12739
tg3_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)12740 static int tg3_set_phys_id(struct net_device *dev,
12741 enum ethtool_phys_id_state state)
12742 {
12743 struct tg3 *tp = netdev_priv(dev);
12744
12745 if (!netif_running(tp->dev))
12746 return -EAGAIN;
12747
12748 switch (state) {
12749 case ETHTOOL_ID_ACTIVE:
12750 return 1; /* cycle on/off once per second */
12751
12752 case ETHTOOL_ID_ON:
12753 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12754 LED_CTRL_1000MBPS_ON |
12755 LED_CTRL_100MBPS_ON |
12756 LED_CTRL_10MBPS_ON |
12757 LED_CTRL_TRAFFIC_OVERRIDE |
12758 LED_CTRL_TRAFFIC_BLINK |
12759 LED_CTRL_TRAFFIC_LED);
12760 break;
12761
12762 case ETHTOOL_ID_OFF:
12763 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12764 LED_CTRL_TRAFFIC_OVERRIDE);
12765 break;
12766
12767 case ETHTOOL_ID_INACTIVE:
12768 tw32(MAC_LED_CTRL, tp->led_ctrl);
12769 break;
12770 }
12771
12772 return 0;
12773 }
12774
tg3_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)12775 static void tg3_get_ethtool_stats(struct net_device *dev,
12776 struct ethtool_stats *estats, u64 *tmp_stats)
12777 {
12778 struct tg3 *tp = netdev_priv(dev);
12779
12780 if (tp->hw_stats)
12781 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12782 else
12783 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12784 }
12785
tg3_vpd_readblock(struct tg3 * tp,u32 * vpdlen)12786 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12787 {
12788 int i;
12789 __be32 *buf;
12790 u32 offset = 0, len = 0;
12791 u32 magic, val;
12792
12793 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12794 return NULL;
12795
12796 if (magic == TG3_EEPROM_MAGIC) {
12797 for (offset = TG3_NVM_DIR_START;
12798 offset < TG3_NVM_DIR_END;
12799 offset += TG3_NVM_DIRENT_SIZE) {
12800 if (tg3_nvram_read(tp, offset, &val))
12801 return NULL;
12802
12803 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12804 TG3_NVM_DIRTYPE_EXTVPD)
12805 break;
12806 }
12807
12808 if (offset != TG3_NVM_DIR_END) {
12809 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12810 if (tg3_nvram_read(tp, offset + 4, &offset))
12811 return NULL;
12812
12813 offset = tg3_nvram_logical_addr(tp, offset);
12814 }
12815 }
12816
12817 if (!offset || !len) {
12818 offset = TG3_NVM_VPD_OFF;
12819 len = TG3_NVM_VPD_LEN;
12820 }
12821
12822 buf = kmalloc(len, GFP_KERNEL);
12823 if (buf == NULL)
12824 return NULL;
12825
12826 if (magic == TG3_EEPROM_MAGIC) {
12827 for (i = 0; i < len; i += 4) {
12828 /* The data is in little-endian format in NVRAM.
12829 * Use the big-endian read routines to preserve
12830 * the byte order as it exists in NVRAM.
12831 */
12832 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12833 goto error;
12834 }
12835 } else {
12836 u8 *ptr;
12837 ssize_t cnt;
12838 unsigned int pos = 0;
12839
12840 ptr = (u8 *)&buf[0];
12841 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12842 cnt = pci_read_vpd(tp->pdev, pos,
12843 len - pos, ptr);
12844 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12845 cnt = 0;
12846 else if (cnt < 0)
12847 goto error;
12848 }
12849 if (pos != len)
12850 goto error;
12851 }
12852
12853 *vpdlen = len;
12854
12855 return buf;
12856
12857 error:
12858 kfree(buf);
12859 return NULL;
12860 }
12861
12862 #define NVRAM_TEST_SIZE 0x100
12863 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12864 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12865 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12866 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12867 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12868 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12869 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12870 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12871
tg3_test_nvram(struct tg3 * tp)12872 static int tg3_test_nvram(struct tg3 *tp)
12873 {
12874 u32 csum, magic, len;
12875 __be32 *buf;
12876 int i, j, k, err = 0, size;
12877
12878 if (tg3_flag(tp, NO_NVRAM))
12879 return 0;
12880
12881 if (tg3_nvram_read(tp, 0, &magic) != 0)
12882 return -EIO;
12883
12884 if (magic == TG3_EEPROM_MAGIC)
12885 size = NVRAM_TEST_SIZE;
12886 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12887 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12888 TG3_EEPROM_SB_FORMAT_1) {
12889 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12890 case TG3_EEPROM_SB_REVISION_0:
12891 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12892 break;
12893 case TG3_EEPROM_SB_REVISION_2:
12894 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12895 break;
12896 case TG3_EEPROM_SB_REVISION_3:
12897 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12898 break;
12899 case TG3_EEPROM_SB_REVISION_4:
12900 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12901 break;
12902 case TG3_EEPROM_SB_REVISION_5:
12903 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12904 break;
12905 case TG3_EEPROM_SB_REVISION_6:
12906 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12907 break;
12908 default:
12909 return -EIO;
12910 }
12911 } else
12912 return 0;
12913 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12914 size = NVRAM_SELFBOOT_HW_SIZE;
12915 else
12916 return -EIO;
12917
12918 buf = kmalloc(size, GFP_KERNEL);
12919 if (buf == NULL)
12920 return -ENOMEM;
12921
12922 err = -EIO;
12923 for (i = 0, j = 0; i < size; i += 4, j++) {
12924 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12925 if (err)
12926 break;
12927 }
12928 if (i < size)
12929 goto out;
12930
12931 /* Selfboot format */
12932 magic = be32_to_cpu(buf[0]);
12933 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12934 TG3_EEPROM_MAGIC_FW) {
12935 u8 *buf8 = (u8 *) buf, csum8 = 0;
12936
12937 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12938 TG3_EEPROM_SB_REVISION_2) {
12939 /* For rev 2, the csum doesn't include the MBA. */
12940 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12941 csum8 += buf8[i];
12942 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12943 csum8 += buf8[i];
12944 } else {
12945 for (i = 0; i < size; i++)
12946 csum8 += buf8[i];
12947 }
12948
12949 if (csum8 == 0) {
12950 err = 0;
12951 goto out;
12952 }
12953
12954 err = -EIO;
12955 goto out;
12956 }
12957
12958 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12959 TG3_EEPROM_MAGIC_HW) {
12960 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12961 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12962 u8 *buf8 = (u8 *) buf;
12963
12964 /* Separate the parity bits and the data bytes. */
12965 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12966 if ((i == 0) || (i == 8)) {
12967 int l;
12968 u8 msk;
12969
12970 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12971 parity[k++] = buf8[i] & msk;
12972 i++;
12973 } else if (i == 16) {
12974 int l;
12975 u8 msk;
12976
12977 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12978 parity[k++] = buf8[i] & msk;
12979 i++;
12980
12981 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12982 parity[k++] = buf8[i] & msk;
12983 i++;
12984 }
12985 data[j++] = buf8[i];
12986 }
12987
12988 err = -EIO;
12989 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12990 u8 hw8 = hweight8(data[i]);
12991
12992 if ((hw8 & 0x1) && parity[i])
12993 goto out;
12994 else if (!(hw8 & 0x1) && !parity[i])
12995 goto out;
12996 }
12997 err = 0;
12998 goto out;
12999 }
13000
13001 err = -EIO;
13002
13003 /* Bootstrap checksum at offset 0x10 */
13004 csum = calc_crc((unsigned char *) buf, 0x10);
13005 if (csum != le32_to_cpu(buf[0x10/4]))
13006 goto out;
13007
13008 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13009 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13010 if (csum != le32_to_cpu(buf[0xfc/4]))
13011 goto out;
13012
13013 kfree(buf);
13014
13015 buf = tg3_vpd_readblock(tp, &len);
13016 if (!buf)
13017 return -ENOMEM;
13018
13019 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13020 if (i > 0) {
13021 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13022 if (j < 0)
13023 goto out;
13024
13025 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13026 goto out;
13027
13028 i += PCI_VPD_LRDT_TAG_SIZE;
13029 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13030 PCI_VPD_RO_KEYWORD_CHKSUM);
13031 if (j > 0) {
13032 u8 csum8 = 0;
13033
13034 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13035
13036 for (i = 0; i <= j; i++)
13037 csum8 += ((u8 *)buf)[i];
13038
13039 if (csum8)
13040 goto out;
13041 }
13042 }
13043
13044 err = 0;
13045
13046 out:
13047 kfree(buf);
13048 return err;
13049 }
13050
13051 #define TG3_SERDES_TIMEOUT_SEC 2
13052 #define TG3_COPPER_TIMEOUT_SEC 6
13053
tg3_test_link(struct tg3 * tp)13054 static int tg3_test_link(struct tg3 *tp)
13055 {
13056 int i, max;
13057
13058 if (!netif_running(tp->dev))
13059 return -ENODEV;
13060
13061 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13062 max = TG3_SERDES_TIMEOUT_SEC;
13063 else
13064 max = TG3_COPPER_TIMEOUT_SEC;
13065
13066 for (i = 0; i < max; i++) {
13067 if (tp->link_up)
13068 return 0;
13069
13070 if (msleep_interruptible(1000))
13071 break;
13072 }
13073
13074 return -EIO;
13075 }
13076
13077 /* Only test the commonly used registers */
tg3_test_registers(struct tg3 * tp)13078 static int tg3_test_registers(struct tg3 *tp)
13079 {
13080 int i, is_5705, is_5750;
13081 u32 offset, read_mask, write_mask, val, save_val, read_val;
13082 static struct {
13083 u16 offset;
13084 u16 flags;
13085 #define TG3_FL_5705 0x1
13086 #define TG3_FL_NOT_5705 0x2
13087 #define TG3_FL_NOT_5788 0x4
13088 #define TG3_FL_NOT_5750 0x8
13089 u32 read_mask;
13090 u32 write_mask;
13091 } reg_tbl[] = {
13092 /* MAC Control Registers */
13093 { MAC_MODE, TG3_FL_NOT_5705,
13094 0x00000000, 0x00ef6f8c },
13095 { MAC_MODE, TG3_FL_5705,
13096 0x00000000, 0x01ef6b8c },
13097 { MAC_STATUS, TG3_FL_NOT_5705,
13098 0x03800107, 0x00000000 },
13099 { MAC_STATUS, TG3_FL_5705,
13100 0x03800100, 0x00000000 },
13101 { MAC_ADDR_0_HIGH, 0x0000,
13102 0x00000000, 0x0000ffff },
13103 { MAC_ADDR_0_LOW, 0x0000,
13104 0x00000000, 0xffffffff },
13105 { MAC_RX_MTU_SIZE, 0x0000,
13106 0x00000000, 0x0000ffff },
13107 { MAC_TX_MODE, 0x0000,
13108 0x00000000, 0x00000070 },
13109 { MAC_TX_LENGTHS, 0x0000,
13110 0x00000000, 0x00003fff },
13111 { MAC_RX_MODE, TG3_FL_NOT_5705,
13112 0x00000000, 0x000007fc },
13113 { MAC_RX_MODE, TG3_FL_5705,
13114 0x00000000, 0x000007dc },
13115 { MAC_HASH_REG_0, 0x0000,
13116 0x00000000, 0xffffffff },
13117 { MAC_HASH_REG_1, 0x0000,
13118 0x00000000, 0xffffffff },
13119 { MAC_HASH_REG_2, 0x0000,
13120 0x00000000, 0xffffffff },
13121 { MAC_HASH_REG_3, 0x0000,
13122 0x00000000, 0xffffffff },
13123
13124 /* Receive Data and Receive BD Initiator Control Registers. */
13125 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13126 0x00000000, 0xffffffff },
13127 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13128 0x00000000, 0xffffffff },
13129 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13130 0x00000000, 0x00000003 },
13131 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13132 0x00000000, 0xffffffff },
13133 { RCVDBDI_STD_BD+0, 0x0000,
13134 0x00000000, 0xffffffff },
13135 { RCVDBDI_STD_BD+4, 0x0000,
13136 0x00000000, 0xffffffff },
13137 { RCVDBDI_STD_BD+8, 0x0000,
13138 0x00000000, 0xffff0002 },
13139 { RCVDBDI_STD_BD+0xc, 0x0000,
13140 0x00000000, 0xffffffff },
13141
13142 /* Receive BD Initiator Control Registers. */
13143 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13144 0x00000000, 0xffffffff },
13145 { RCVBDI_STD_THRESH, TG3_FL_5705,
13146 0x00000000, 0x000003ff },
13147 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13148 0x00000000, 0xffffffff },
13149
13150 /* Host Coalescing Control Registers. */
13151 { HOSTCC_MODE, TG3_FL_NOT_5705,
13152 0x00000000, 0x00000004 },
13153 { HOSTCC_MODE, TG3_FL_5705,
13154 0x00000000, 0x000000f6 },
13155 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13156 0x00000000, 0xffffffff },
13157 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13158 0x00000000, 0x000003ff },
13159 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13160 0x00000000, 0xffffffff },
13161 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13162 0x00000000, 0x000003ff },
13163 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13164 0x00000000, 0xffffffff },
13165 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13166 0x00000000, 0x000000ff },
13167 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13168 0x00000000, 0xffffffff },
13169 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13170 0x00000000, 0x000000ff },
13171 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13172 0x00000000, 0xffffffff },
13173 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13174 0x00000000, 0xffffffff },
13175 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13176 0x00000000, 0xffffffff },
13177 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13178 0x00000000, 0x000000ff },
13179 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13180 0x00000000, 0xffffffff },
13181 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13182 0x00000000, 0x000000ff },
13183 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13184 0x00000000, 0xffffffff },
13185 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13186 0x00000000, 0xffffffff },
13187 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13188 0x00000000, 0xffffffff },
13189 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13190 0x00000000, 0xffffffff },
13191 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13192 0x00000000, 0xffffffff },
13193 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13194 0xffffffff, 0x00000000 },
13195 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13196 0xffffffff, 0x00000000 },
13197
13198 /* Buffer Manager Control Registers. */
13199 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13200 0x00000000, 0x007fff80 },
13201 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13202 0x00000000, 0x007fffff },
13203 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13204 0x00000000, 0x0000003f },
13205 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13206 0x00000000, 0x000001ff },
13207 { BUFMGR_MB_HIGH_WATER, 0x0000,
13208 0x00000000, 0x000001ff },
13209 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13210 0xffffffff, 0x00000000 },
13211 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13212 0xffffffff, 0x00000000 },
13213
13214 /* Mailbox Registers */
13215 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13216 0x00000000, 0x000001ff },
13217 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13218 0x00000000, 0x000001ff },
13219 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13220 0x00000000, 0x000007ff },
13221 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13222 0x00000000, 0x000001ff },
13223
13224 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13225 };
13226
13227 is_5705 = is_5750 = 0;
13228 if (tg3_flag(tp, 5705_PLUS)) {
13229 is_5705 = 1;
13230 if (tg3_flag(tp, 5750_PLUS))
13231 is_5750 = 1;
13232 }
13233
13234 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13235 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13236 continue;
13237
13238 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13239 continue;
13240
13241 if (tg3_flag(tp, IS_5788) &&
13242 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13243 continue;
13244
13245 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13246 continue;
13247
13248 offset = (u32) reg_tbl[i].offset;
13249 read_mask = reg_tbl[i].read_mask;
13250 write_mask = reg_tbl[i].write_mask;
13251
13252 /* Save the original register content */
13253 save_val = tr32(offset);
13254
13255 /* Determine the read-only value. */
13256 read_val = save_val & read_mask;
13257
13258 /* Write zero to the register, then make sure the read-only bits
13259 * are not changed and the read/write bits are all zeros.
13260 */
13261 tw32(offset, 0);
13262
13263 val = tr32(offset);
13264
13265 /* Test the read-only and read/write bits. */
13266 if (((val & read_mask) != read_val) || (val & write_mask))
13267 goto out;
13268
13269 /* Write ones to all the bits defined by RdMask and WrMask, then
13270 * make sure the read-only bits are not changed and the
13271 * read/write bits are all ones.
13272 */
13273 tw32(offset, read_mask | write_mask);
13274
13275 val = tr32(offset);
13276
13277 /* Test the read-only bits. */
13278 if ((val & read_mask) != read_val)
13279 goto out;
13280
13281 /* Test the read/write bits. */
13282 if ((val & write_mask) != write_mask)
13283 goto out;
13284
13285 tw32(offset, save_val);
13286 }
13287
13288 return 0;
13289
13290 out:
13291 if (netif_msg_hw(tp))
13292 netdev_err(tp->dev,
13293 "Register test failed at offset %x\n", offset);
13294 tw32(offset, save_val);
13295 return -EIO;
13296 }
13297
tg3_do_mem_test(struct tg3 * tp,u32 offset,u32 len)13298 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13299 {
13300 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13301 int i;
13302 u32 j;
13303
13304 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13305 for (j = 0; j < len; j += 4) {
13306 u32 val;
13307
13308 tg3_write_mem(tp, offset + j, test_pattern[i]);
13309 tg3_read_mem(tp, offset + j, &val);
13310 if (val != test_pattern[i])
13311 return -EIO;
13312 }
13313 }
13314 return 0;
13315 }
13316
tg3_test_memory(struct tg3 * tp)13317 static int tg3_test_memory(struct tg3 *tp)
13318 {
13319 static struct mem_entry {
13320 u32 offset;
13321 u32 len;
13322 } mem_tbl_570x[] = {
13323 { 0x00000000, 0x00b50},
13324 { 0x00002000, 0x1c000},
13325 { 0xffffffff, 0x00000}
13326 }, mem_tbl_5705[] = {
13327 { 0x00000100, 0x0000c},
13328 { 0x00000200, 0x00008},
13329 { 0x00004000, 0x00800},
13330 { 0x00006000, 0x01000},
13331 { 0x00008000, 0x02000},
13332 { 0x00010000, 0x0e000},
13333 { 0xffffffff, 0x00000}
13334 }, mem_tbl_5755[] = {
13335 { 0x00000200, 0x00008},
13336 { 0x00004000, 0x00800},
13337 { 0x00006000, 0x00800},
13338 { 0x00008000, 0x02000},
13339 { 0x00010000, 0x0c000},
13340 { 0xffffffff, 0x00000}
13341 }, mem_tbl_5906[] = {
13342 { 0x00000200, 0x00008},
13343 { 0x00004000, 0x00400},
13344 { 0x00006000, 0x00400},
13345 { 0x00008000, 0x01000},
13346 { 0x00010000, 0x01000},
13347 { 0xffffffff, 0x00000}
13348 }, mem_tbl_5717[] = {
13349 { 0x00000200, 0x00008},
13350 { 0x00010000, 0x0a000},
13351 { 0x00020000, 0x13c00},
13352 { 0xffffffff, 0x00000}
13353 }, mem_tbl_57765[] = {
13354 { 0x00000200, 0x00008},
13355 { 0x00004000, 0x00800},
13356 { 0x00006000, 0x09800},
13357 { 0x00010000, 0x0a000},
13358 { 0xffffffff, 0x00000}
13359 };
13360 struct mem_entry *mem_tbl;
13361 int err = 0;
13362 int i;
13363
13364 if (tg3_flag(tp, 5717_PLUS))
13365 mem_tbl = mem_tbl_5717;
13366 else if (tg3_flag(tp, 57765_CLASS) ||
13367 tg3_asic_rev(tp) == ASIC_REV_5762)
13368 mem_tbl = mem_tbl_57765;
13369 else if (tg3_flag(tp, 5755_PLUS))
13370 mem_tbl = mem_tbl_5755;
13371 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13372 mem_tbl = mem_tbl_5906;
13373 else if (tg3_flag(tp, 5705_PLUS))
13374 mem_tbl = mem_tbl_5705;
13375 else
13376 mem_tbl = mem_tbl_570x;
13377
13378 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13379 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13380 if (err)
13381 break;
13382 }
13383
13384 return err;
13385 }
13386
13387 #define TG3_TSO_MSS 500
13388
13389 #define TG3_TSO_IP_HDR_LEN 20
13390 #define TG3_TSO_TCP_HDR_LEN 20
13391 #define TG3_TSO_TCP_OPT_LEN 12
13392
13393 static const u8 tg3_tso_header[] = {
13394 0x08, 0x00,
13395 0x45, 0x00, 0x00, 0x00,
13396 0x00, 0x00, 0x40, 0x00,
13397 0x40, 0x06, 0x00, 0x00,
13398 0x0a, 0x00, 0x00, 0x01,
13399 0x0a, 0x00, 0x00, 0x02,
13400 0x0d, 0x00, 0xe0, 0x00,
13401 0x00, 0x00, 0x01, 0x00,
13402 0x00, 0x00, 0x02, 0x00,
13403 0x80, 0x10, 0x10, 0x00,
13404 0x14, 0x09, 0x00, 0x00,
13405 0x01, 0x01, 0x08, 0x0a,
13406 0x11, 0x11, 0x11, 0x11,
13407 0x11, 0x11, 0x11, 0x11,
13408 };
13409
tg3_run_loopback(struct tg3 * tp,u32 pktsz,bool tso_loopback)13410 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13411 {
13412 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13413 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13414 u32 budget;
13415 struct sk_buff *skb;
13416 u8 *tx_data, *rx_data;
13417 dma_addr_t map;
13418 int num_pkts, tx_len, rx_len, i, err;
13419 struct tg3_rx_buffer_desc *desc;
13420 struct tg3_napi *tnapi, *rnapi;
13421 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13422
13423 tnapi = &tp->napi[0];
13424 rnapi = &tp->napi[0];
13425 if (tp->irq_cnt > 1) {
13426 if (tg3_flag(tp, ENABLE_RSS))
13427 rnapi = &tp->napi[1];
13428 if (tg3_flag(tp, ENABLE_TSS))
13429 tnapi = &tp->napi[1];
13430 }
13431 coal_now = tnapi->coal_now | rnapi->coal_now;
13432
13433 err = -EIO;
13434
13435 tx_len = pktsz;
13436 skb = netdev_alloc_skb(tp->dev, tx_len);
13437 if (!skb)
13438 return -ENOMEM;
13439
13440 tx_data = skb_put(skb, tx_len);
13441 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13442 memset(tx_data + ETH_ALEN, 0x0, 8);
13443
13444 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13445
13446 if (tso_loopback) {
13447 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13448
13449 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13450 TG3_TSO_TCP_OPT_LEN;
13451
13452 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13453 sizeof(tg3_tso_header));
13454 mss = TG3_TSO_MSS;
13455
13456 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13457 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13458
13459 /* Set the total length field in the IP header */
13460 iph->tot_len = htons((u16)(mss + hdr_len));
13461
13462 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13463 TXD_FLAG_CPU_POST_DMA);
13464
13465 if (tg3_flag(tp, HW_TSO_1) ||
13466 tg3_flag(tp, HW_TSO_2) ||
13467 tg3_flag(tp, HW_TSO_3)) {
13468 struct tcphdr *th;
13469 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13470 th = (struct tcphdr *)&tx_data[val];
13471 th->check = 0;
13472 } else
13473 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13474
13475 if (tg3_flag(tp, HW_TSO_3)) {
13476 mss |= (hdr_len & 0xc) << 12;
13477 if (hdr_len & 0x10)
13478 base_flags |= 0x00000010;
13479 base_flags |= (hdr_len & 0x3e0) << 5;
13480 } else if (tg3_flag(tp, HW_TSO_2))
13481 mss |= hdr_len << 9;
13482 else if (tg3_flag(tp, HW_TSO_1) ||
13483 tg3_asic_rev(tp) == ASIC_REV_5705) {
13484 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13485 } else {
13486 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13487 }
13488
13489 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13490 } else {
13491 num_pkts = 1;
13492 data_off = ETH_HLEN;
13493
13494 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13495 tx_len > VLAN_ETH_FRAME_LEN)
13496 base_flags |= TXD_FLAG_JMB_PKT;
13497 }
13498
13499 for (i = data_off; i < tx_len; i++)
13500 tx_data[i] = (u8) (i & 0xff);
13501
13502 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13503 if (pci_dma_mapping_error(tp->pdev, map)) {
13504 dev_kfree_skb(skb);
13505 return -EIO;
13506 }
13507
13508 val = tnapi->tx_prod;
13509 tnapi->tx_buffers[val].skb = skb;
13510 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13511
13512 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13513 rnapi->coal_now);
13514
13515 udelay(10);
13516
13517 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13518
13519 budget = tg3_tx_avail(tnapi);
13520 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13521 base_flags | TXD_FLAG_END, mss, 0)) {
13522 tnapi->tx_buffers[val].skb = NULL;
13523 dev_kfree_skb(skb);
13524 return -EIO;
13525 }
13526
13527 tnapi->tx_prod++;
13528
13529 /* Sync BD data before updating mailbox */
13530 wmb();
13531
13532 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13533 tr32_mailbox(tnapi->prodmbox);
13534
13535 udelay(10);
13536
13537 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13538 for (i = 0; i < 35; i++) {
13539 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13540 coal_now);
13541
13542 udelay(10);
13543
13544 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13545 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13546 if ((tx_idx == tnapi->tx_prod) &&
13547 (rx_idx == (rx_start_idx + num_pkts)))
13548 break;
13549 }
13550
13551 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13552 dev_kfree_skb(skb);
13553
13554 if (tx_idx != tnapi->tx_prod)
13555 goto out;
13556
13557 if (rx_idx != rx_start_idx + num_pkts)
13558 goto out;
13559
13560 val = data_off;
13561 while (rx_idx != rx_start_idx) {
13562 desc = &rnapi->rx_rcb[rx_start_idx++];
13563 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13564 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13565
13566 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13567 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13568 goto out;
13569
13570 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13571 - ETH_FCS_LEN;
13572
13573 if (!tso_loopback) {
13574 if (rx_len != tx_len)
13575 goto out;
13576
13577 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13578 if (opaque_key != RXD_OPAQUE_RING_STD)
13579 goto out;
13580 } else {
13581 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13582 goto out;
13583 }
13584 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13585 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13586 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13587 goto out;
13588 }
13589
13590 if (opaque_key == RXD_OPAQUE_RING_STD) {
13591 rx_data = tpr->rx_std_buffers[desc_idx].data;
13592 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13593 mapping);
13594 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13595 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13596 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13597 mapping);
13598 } else
13599 goto out;
13600
13601 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13602 PCI_DMA_FROMDEVICE);
13603
13604 rx_data += TG3_RX_OFFSET(tp);
13605 for (i = data_off; i < rx_len; i++, val++) {
13606 if (*(rx_data + i) != (u8) (val & 0xff))
13607 goto out;
13608 }
13609 }
13610
13611 err = 0;
13612
13613 /* tg3_free_rings will unmap and free the rx_data */
13614 out:
13615 return err;
13616 }
13617
13618 #define TG3_STD_LOOPBACK_FAILED 1
13619 #define TG3_JMB_LOOPBACK_FAILED 2
13620 #define TG3_TSO_LOOPBACK_FAILED 4
13621 #define TG3_LOOPBACK_FAILED \
13622 (TG3_STD_LOOPBACK_FAILED | \
13623 TG3_JMB_LOOPBACK_FAILED | \
13624 TG3_TSO_LOOPBACK_FAILED)
13625
tg3_test_loopback(struct tg3 * tp,u64 * data,bool do_extlpbk)13626 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13627 {
13628 int err = -EIO;
13629 u32 eee_cap;
13630 u32 jmb_pkt_sz = 9000;
13631
13632 if (tp->dma_limit)
13633 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13634
13635 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13636 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13637
13638 if (!netif_running(tp->dev)) {
13639 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13640 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13641 if (do_extlpbk)
13642 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13643 goto done;
13644 }
13645
13646 err = tg3_reset_hw(tp, true);
13647 if (err) {
13648 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13649 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13650 if (do_extlpbk)
13651 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13652 goto done;
13653 }
13654
13655 if (tg3_flag(tp, ENABLE_RSS)) {
13656 int i;
13657
13658 /* Reroute all rx packets to the 1st queue */
13659 for (i = MAC_RSS_INDIR_TBL_0;
13660 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13661 tw32(i, 0x0);
13662 }
13663
13664 /* HW errata - mac loopback fails in some cases on 5780.
13665 * Normal traffic and PHY loopback are not affected by
13666 * errata. Also, the MAC loopback test is deprecated for
13667 * all newer ASIC revisions.
13668 */
13669 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13670 !tg3_flag(tp, CPMU_PRESENT)) {
13671 tg3_mac_loopback(tp, true);
13672
13673 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13674 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13675
13676 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13677 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13678 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13679
13680 tg3_mac_loopback(tp, false);
13681 }
13682
13683 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13684 !tg3_flag(tp, USE_PHYLIB)) {
13685 int i;
13686
13687 tg3_phy_lpbk_set(tp, 0, false);
13688
13689 /* Wait for link */
13690 for (i = 0; i < 100; i++) {
13691 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13692 break;
13693 mdelay(1);
13694 }
13695
13696 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13697 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13698 if (tg3_flag(tp, TSO_CAPABLE) &&
13699 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13700 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13701 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13702 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13703 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13704
13705 if (do_extlpbk) {
13706 tg3_phy_lpbk_set(tp, 0, true);
13707
13708 /* All link indications report up, but the hardware
13709 * isn't really ready for about 20 msec. Double it
13710 * to be sure.
13711 */
13712 mdelay(40);
13713
13714 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13715 data[TG3_EXT_LOOPB_TEST] |=
13716 TG3_STD_LOOPBACK_FAILED;
13717 if (tg3_flag(tp, TSO_CAPABLE) &&
13718 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13719 data[TG3_EXT_LOOPB_TEST] |=
13720 TG3_TSO_LOOPBACK_FAILED;
13721 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13722 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13723 data[TG3_EXT_LOOPB_TEST] |=
13724 TG3_JMB_LOOPBACK_FAILED;
13725 }
13726
13727 /* Re-enable gphy autopowerdown. */
13728 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13729 tg3_phy_toggle_apd(tp, true);
13730 }
13731
13732 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13733 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13734
13735 done:
13736 tp->phy_flags |= eee_cap;
13737
13738 return err;
13739 }
13740
tg3_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * data)13741 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13742 u64 *data)
13743 {
13744 struct tg3 *tp = netdev_priv(dev);
13745 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13746
13747 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13748 if (tg3_power_up(tp)) {
13749 etest->flags |= ETH_TEST_FL_FAILED;
13750 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13751 return;
13752 }
13753 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13754 }
13755
13756 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13757
13758 if (tg3_test_nvram(tp) != 0) {
13759 etest->flags |= ETH_TEST_FL_FAILED;
13760 data[TG3_NVRAM_TEST] = 1;
13761 }
13762 if (!doextlpbk && tg3_test_link(tp)) {
13763 etest->flags |= ETH_TEST_FL_FAILED;
13764 data[TG3_LINK_TEST] = 1;
13765 }
13766 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13767 int err, err2 = 0, irq_sync = 0;
13768
13769 if (netif_running(dev)) {
13770 tg3_phy_stop(tp);
13771 tg3_netif_stop(tp);
13772 irq_sync = 1;
13773 }
13774
13775 tg3_full_lock(tp, irq_sync);
13776 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13777 err = tg3_nvram_lock(tp);
13778 tg3_halt_cpu(tp, RX_CPU_BASE);
13779 if (!tg3_flag(tp, 5705_PLUS))
13780 tg3_halt_cpu(tp, TX_CPU_BASE);
13781 if (!err)
13782 tg3_nvram_unlock(tp);
13783
13784 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13785 tg3_phy_reset(tp);
13786
13787 if (tg3_test_registers(tp) != 0) {
13788 etest->flags |= ETH_TEST_FL_FAILED;
13789 data[TG3_REGISTER_TEST] = 1;
13790 }
13791
13792 if (tg3_test_memory(tp) != 0) {
13793 etest->flags |= ETH_TEST_FL_FAILED;
13794 data[TG3_MEMORY_TEST] = 1;
13795 }
13796
13797 if (doextlpbk)
13798 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13799
13800 if (tg3_test_loopback(tp, data, doextlpbk))
13801 etest->flags |= ETH_TEST_FL_FAILED;
13802
13803 tg3_full_unlock(tp);
13804
13805 if (tg3_test_interrupt(tp) != 0) {
13806 etest->flags |= ETH_TEST_FL_FAILED;
13807 data[TG3_INTERRUPT_TEST] = 1;
13808 }
13809
13810 tg3_full_lock(tp, 0);
13811
13812 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13813 if (netif_running(dev)) {
13814 tg3_flag_set(tp, INIT_COMPLETE);
13815 err2 = tg3_restart_hw(tp, true);
13816 if (!err2)
13817 tg3_netif_start(tp);
13818 }
13819
13820 tg3_full_unlock(tp);
13821
13822 if (irq_sync && !err2)
13823 tg3_phy_start(tp);
13824 }
13825 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13826 tg3_power_down_prepare(tp);
13827
13828 }
13829
tg3_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)13830 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13831 {
13832 struct tg3 *tp = netdev_priv(dev);
13833 struct hwtstamp_config stmpconf;
13834
13835 if (!tg3_flag(tp, PTP_CAPABLE))
13836 return -EOPNOTSUPP;
13837
13838 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13839 return -EFAULT;
13840
13841 if (stmpconf.flags)
13842 return -EINVAL;
13843
13844 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13845 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13846 return -ERANGE;
13847
13848 switch (stmpconf.rx_filter) {
13849 case HWTSTAMP_FILTER_NONE:
13850 tp->rxptpctl = 0;
13851 break;
13852 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13853 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13854 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13855 break;
13856 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13857 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13858 TG3_RX_PTP_CTL_SYNC_EVNT;
13859 break;
13860 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13861 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13862 TG3_RX_PTP_CTL_DELAY_REQ;
13863 break;
13864 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13865 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13866 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13867 break;
13868 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13869 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13870 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13871 break;
13872 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13873 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13874 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13875 break;
13876 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13877 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13878 TG3_RX_PTP_CTL_SYNC_EVNT;
13879 break;
13880 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13881 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13882 TG3_RX_PTP_CTL_SYNC_EVNT;
13883 break;
13884 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13885 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13886 TG3_RX_PTP_CTL_SYNC_EVNT;
13887 break;
13888 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13889 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13890 TG3_RX_PTP_CTL_DELAY_REQ;
13891 break;
13892 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13893 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13894 TG3_RX_PTP_CTL_DELAY_REQ;
13895 break;
13896 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13897 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13898 TG3_RX_PTP_CTL_DELAY_REQ;
13899 break;
13900 default:
13901 return -ERANGE;
13902 }
13903
13904 if (netif_running(dev) && tp->rxptpctl)
13905 tw32(TG3_RX_PTP_CTL,
13906 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13907
13908 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13909 tg3_flag_set(tp, TX_TSTAMP_EN);
13910 else
13911 tg3_flag_clear(tp, TX_TSTAMP_EN);
13912
13913 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13914 -EFAULT : 0;
13915 }
13916
tg3_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)13917 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13918 {
13919 struct tg3 *tp = netdev_priv(dev);
13920 struct hwtstamp_config stmpconf;
13921
13922 if (!tg3_flag(tp, PTP_CAPABLE))
13923 return -EOPNOTSUPP;
13924
13925 stmpconf.flags = 0;
13926 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13927 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13928
13929 switch (tp->rxptpctl) {
13930 case 0:
13931 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13932 break;
13933 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13934 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13935 break;
13936 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13937 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13938 break;
13939 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13940 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13941 break;
13942 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13943 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13944 break;
13945 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13946 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13947 break;
13948 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13949 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13950 break;
13951 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13952 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13953 break;
13954 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13955 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13956 break;
13957 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13958 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13959 break;
13960 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13961 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13962 break;
13963 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13964 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13965 break;
13966 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13967 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13968 break;
13969 default:
13970 WARN_ON_ONCE(1);
13971 return -ERANGE;
13972 }
13973
13974 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13975 -EFAULT : 0;
13976 }
13977
tg3_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)13978 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13979 {
13980 struct mii_ioctl_data *data = if_mii(ifr);
13981 struct tg3 *tp = netdev_priv(dev);
13982 int err;
13983
13984 if (tg3_flag(tp, USE_PHYLIB)) {
13985 struct phy_device *phydev;
13986 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13987 return -EAGAIN;
13988 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
13989 return phy_mii_ioctl(phydev, ifr, cmd);
13990 }
13991
13992 switch (cmd) {
13993 case SIOCGMIIPHY:
13994 data->phy_id = tp->phy_addr;
13995
13996 /* fallthru */
13997 case SIOCGMIIREG: {
13998 u32 mii_regval;
13999
14000 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14001 break; /* We have no PHY */
14002
14003 if (!netif_running(dev))
14004 return -EAGAIN;
14005
14006 spin_lock_bh(&tp->lock);
14007 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14008 data->reg_num & 0x1f, &mii_regval);
14009 spin_unlock_bh(&tp->lock);
14010
14011 data->val_out = mii_regval;
14012
14013 return err;
14014 }
14015
14016 case SIOCSMIIREG:
14017 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14018 break; /* We have no PHY */
14019
14020 if (!netif_running(dev))
14021 return -EAGAIN;
14022
14023 spin_lock_bh(&tp->lock);
14024 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14025 data->reg_num & 0x1f, data->val_in);
14026 spin_unlock_bh(&tp->lock);
14027
14028 return err;
14029
14030 case SIOCSHWTSTAMP:
14031 return tg3_hwtstamp_set(dev, ifr);
14032
14033 case SIOCGHWTSTAMP:
14034 return tg3_hwtstamp_get(dev, ifr);
14035
14036 default:
14037 /* do nothing */
14038 break;
14039 }
14040 return -EOPNOTSUPP;
14041 }
14042
tg3_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)14043 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14044 {
14045 struct tg3 *tp = netdev_priv(dev);
14046
14047 memcpy(ec, &tp->coal, sizeof(*ec));
14048 return 0;
14049 }
14050
tg3_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)14051 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14052 {
14053 struct tg3 *tp = netdev_priv(dev);
14054 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14055 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14056
14057 if (!tg3_flag(tp, 5705_PLUS)) {
14058 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14059 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14060 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14061 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14062 }
14063
14064 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14065 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14066 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14067 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14068 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14069 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14070 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14071 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14072 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14073 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14074 return -EINVAL;
14075
14076 /* No rx interrupts will be generated if both are zero */
14077 if ((ec->rx_coalesce_usecs == 0) &&
14078 (ec->rx_max_coalesced_frames == 0))
14079 return -EINVAL;
14080
14081 /* No tx interrupts will be generated if both are zero */
14082 if ((ec->tx_coalesce_usecs == 0) &&
14083 (ec->tx_max_coalesced_frames == 0))
14084 return -EINVAL;
14085
14086 /* Only copy relevant parameters, ignore all others. */
14087 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14088 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14089 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14090 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14091 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14092 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14093 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14094 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14095 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14096
14097 if (netif_running(dev)) {
14098 tg3_full_lock(tp, 0);
14099 __tg3_set_coalesce(tp, &tp->coal);
14100 tg3_full_unlock(tp);
14101 }
14102 return 0;
14103 }
14104
tg3_set_eee(struct net_device * dev,struct ethtool_eee * edata)14105 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14106 {
14107 struct tg3 *tp = netdev_priv(dev);
14108
14109 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14110 netdev_warn(tp->dev, "Board does not support EEE!\n");
14111 return -EOPNOTSUPP;
14112 }
14113
14114 if (edata->advertised != tp->eee.advertised) {
14115 netdev_warn(tp->dev,
14116 "Direct manipulation of EEE advertisement is not supported\n");
14117 return -EINVAL;
14118 }
14119
14120 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14121 netdev_warn(tp->dev,
14122 "Maximal Tx Lpi timer supported is %#x(u)\n",
14123 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14124 return -EINVAL;
14125 }
14126
14127 tp->eee = *edata;
14128
14129 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14130 tg3_warn_mgmt_link_flap(tp);
14131
14132 if (netif_running(tp->dev)) {
14133 tg3_full_lock(tp, 0);
14134 tg3_setup_eee(tp);
14135 tg3_phy_reset(tp);
14136 tg3_full_unlock(tp);
14137 }
14138
14139 return 0;
14140 }
14141
tg3_get_eee(struct net_device * dev,struct ethtool_eee * edata)14142 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14143 {
14144 struct tg3 *tp = netdev_priv(dev);
14145
14146 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14147 netdev_warn(tp->dev,
14148 "Board does not support EEE!\n");
14149 return -EOPNOTSUPP;
14150 }
14151
14152 *edata = tp->eee;
14153 return 0;
14154 }
14155
14156 static const struct ethtool_ops tg3_ethtool_ops = {
14157 .get_settings = tg3_get_settings,
14158 .set_settings = tg3_set_settings,
14159 .get_drvinfo = tg3_get_drvinfo,
14160 .get_regs_len = tg3_get_regs_len,
14161 .get_regs = tg3_get_regs,
14162 .get_wol = tg3_get_wol,
14163 .set_wol = tg3_set_wol,
14164 .get_msglevel = tg3_get_msglevel,
14165 .set_msglevel = tg3_set_msglevel,
14166 .nway_reset = tg3_nway_reset,
14167 .get_link = ethtool_op_get_link,
14168 .get_eeprom_len = tg3_get_eeprom_len,
14169 .get_eeprom = tg3_get_eeprom,
14170 .set_eeprom = tg3_set_eeprom,
14171 .get_ringparam = tg3_get_ringparam,
14172 .set_ringparam = tg3_set_ringparam,
14173 .get_pauseparam = tg3_get_pauseparam,
14174 .set_pauseparam = tg3_set_pauseparam,
14175 .self_test = tg3_self_test,
14176 .get_strings = tg3_get_strings,
14177 .set_phys_id = tg3_set_phys_id,
14178 .get_ethtool_stats = tg3_get_ethtool_stats,
14179 .get_coalesce = tg3_get_coalesce,
14180 .set_coalesce = tg3_set_coalesce,
14181 .get_sset_count = tg3_get_sset_count,
14182 .get_rxnfc = tg3_get_rxnfc,
14183 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14184 .get_rxfh = tg3_get_rxfh,
14185 .set_rxfh = tg3_set_rxfh,
14186 .get_channels = tg3_get_channels,
14187 .set_channels = tg3_set_channels,
14188 .get_ts_info = tg3_get_ts_info,
14189 .get_eee = tg3_get_eee,
14190 .set_eee = tg3_set_eee,
14191 };
14192
tg3_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)14193 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
14194 struct rtnl_link_stats64 *stats)
14195 {
14196 struct tg3 *tp = netdev_priv(dev);
14197
14198 spin_lock_bh(&tp->lock);
14199 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14200 *stats = tp->net_stats_prev;
14201 spin_unlock_bh(&tp->lock);
14202 return stats;
14203 }
14204
14205 tg3_get_nstats(tp, stats);
14206 spin_unlock_bh(&tp->lock);
14207
14208 return stats;
14209 }
14210
tg3_set_rx_mode(struct net_device * dev)14211 static void tg3_set_rx_mode(struct net_device *dev)
14212 {
14213 struct tg3 *tp = netdev_priv(dev);
14214
14215 if (!netif_running(dev))
14216 return;
14217
14218 tg3_full_lock(tp, 0);
14219 __tg3_set_rx_mode(dev);
14220 tg3_full_unlock(tp);
14221 }
14222
tg3_set_mtu(struct net_device * dev,struct tg3 * tp,int new_mtu)14223 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14224 int new_mtu)
14225 {
14226 dev->mtu = new_mtu;
14227
14228 if (new_mtu > ETH_DATA_LEN) {
14229 if (tg3_flag(tp, 5780_CLASS)) {
14230 netdev_update_features(dev);
14231 tg3_flag_clear(tp, TSO_CAPABLE);
14232 } else {
14233 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14234 }
14235 } else {
14236 if (tg3_flag(tp, 5780_CLASS)) {
14237 tg3_flag_set(tp, TSO_CAPABLE);
14238 netdev_update_features(dev);
14239 }
14240 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14241 }
14242 }
14243
tg3_change_mtu(struct net_device * dev,int new_mtu)14244 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14245 {
14246 struct tg3 *tp = netdev_priv(dev);
14247 int err;
14248 bool reset_phy = false;
14249
14250 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
14251 return -EINVAL;
14252
14253 if (!netif_running(dev)) {
14254 /* We'll just catch it later when the
14255 * device is up'd.
14256 */
14257 tg3_set_mtu(dev, tp, new_mtu);
14258 return 0;
14259 }
14260
14261 tg3_phy_stop(tp);
14262
14263 tg3_netif_stop(tp);
14264
14265 tg3_set_mtu(dev, tp, new_mtu);
14266
14267 tg3_full_lock(tp, 1);
14268
14269 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14270
14271 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14272 * breaks all requests to 256 bytes.
14273 */
14274 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14275 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14276 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14277 tg3_asic_rev(tp) == ASIC_REV_5720)
14278 reset_phy = true;
14279
14280 err = tg3_restart_hw(tp, reset_phy);
14281
14282 if (!err)
14283 tg3_netif_start(tp);
14284
14285 tg3_full_unlock(tp);
14286
14287 if (!err)
14288 tg3_phy_start(tp);
14289
14290 return err;
14291 }
14292
14293 static const struct net_device_ops tg3_netdev_ops = {
14294 .ndo_open = tg3_open,
14295 .ndo_stop = tg3_close,
14296 .ndo_start_xmit = tg3_start_xmit,
14297 .ndo_get_stats64 = tg3_get_stats64,
14298 .ndo_validate_addr = eth_validate_addr,
14299 .ndo_set_rx_mode = tg3_set_rx_mode,
14300 .ndo_set_mac_address = tg3_set_mac_addr,
14301 .ndo_do_ioctl = tg3_ioctl,
14302 .ndo_tx_timeout = tg3_tx_timeout,
14303 .ndo_change_mtu = tg3_change_mtu,
14304 .ndo_fix_features = tg3_fix_features,
14305 .ndo_set_features = tg3_set_features,
14306 #ifdef CONFIG_NET_POLL_CONTROLLER
14307 .ndo_poll_controller = tg3_poll_controller,
14308 #endif
14309 };
14310
tg3_get_eeprom_size(struct tg3 * tp)14311 static void tg3_get_eeprom_size(struct tg3 *tp)
14312 {
14313 u32 cursize, val, magic;
14314
14315 tp->nvram_size = EEPROM_CHIP_SIZE;
14316
14317 if (tg3_nvram_read(tp, 0, &magic) != 0)
14318 return;
14319
14320 if ((magic != TG3_EEPROM_MAGIC) &&
14321 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14322 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14323 return;
14324
14325 /*
14326 * Size the chip by reading offsets at increasing powers of two.
14327 * When we encounter our validation signature, we know the addressing
14328 * has wrapped around, and thus have our chip size.
14329 */
14330 cursize = 0x10;
14331
14332 while (cursize < tp->nvram_size) {
14333 if (tg3_nvram_read(tp, cursize, &val) != 0)
14334 return;
14335
14336 if (val == magic)
14337 break;
14338
14339 cursize <<= 1;
14340 }
14341
14342 tp->nvram_size = cursize;
14343 }
14344
tg3_get_nvram_size(struct tg3 * tp)14345 static void tg3_get_nvram_size(struct tg3 *tp)
14346 {
14347 u32 val;
14348
14349 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14350 return;
14351
14352 /* Selfboot format */
14353 if (val != TG3_EEPROM_MAGIC) {
14354 tg3_get_eeprom_size(tp);
14355 return;
14356 }
14357
14358 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14359 if (val != 0) {
14360 /* This is confusing. We want to operate on the
14361 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14362 * call will read from NVRAM and byteswap the data
14363 * according to the byteswapping settings for all
14364 * other register accesses. This ensures the data we
14365 * want will always reside in the lower 16-bits.
14366 * However, the data in NVRAM is in LE format, which
14367 * means the data from the NVRAM read will always be
14368 * opposite the endianness of the CPU. The 16-bit
14369 * byteswap then brings the data to CPU endianness.
14370 */
14371 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14372 return;
14373 }
14374 }
14375 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14376 }
14377
tg3_get_nvram_info(struct tg3 * tp)14378 static void tg3_get_nvram_info(struct tg3 *tp)
14379 {
14380 u32 nvcfg1;
14381
14382 nvcfg1 = tr32(NVRAM_CFG1);
14383 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14384 tg3_flag_set(tp, FLASH);
14385 } else {
14386 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14387 tw32(NVRAM_CFG1, nvcfg1);
14388 }
14389
14390 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14391 tg3_flag(tp, 5780_CLASS)) {
14392 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14393 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14394 tp->nvram_jedecnum = JEDEC_ATMEL;
14395 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14396 tg3_flag_set(tp, NVRAM_BUFFERED);
14397 break;
14398 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14399 tp->nvram_jedecnum = JEDEC_ATMEL;
14400 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14401 break;
14402 case FLASH_VENDOR_ATMEL_EEPROM:
14403 tp->nvram_jedecnum = JEDEC_ATMEL;
14404 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14405 tg3_flag_set(tp, NVRAM_BUFFERED);
14406 break;
14407 case FLASH_VENDOR_ST:
14408 tp->nvram_jedecnum = JEDEC_ST;
14409 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14410 tg3_flag_set(tp, NVRAM_BUFFERED);
14411 break;
14412 case FLASH_VENDOR_SAIFUN:
14413 tp->nvram_jedecnum = JEDEC_SAIFUN;
14414 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14415 break;
14416 case FLASH_VENDOR_SST_SMALL:
14417 case FLASH_VENDOR_SST_LARGE:
14418 tp->nvram_jedecnum = JEDEC_SST;
14419 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14420 break;
14421 }
14422 } else {
14423 tp->nvram_jedecnum = JEDEC_ATMEL;
14424 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14425 tg3_flag_set(tp, NVRAM_BUFFERED);
14426 }
14427 }
14428
tg3_nvram_get_pagesize(struct tg3 * tp,u32 nvmcfg1)14429 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14430 {
14431 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14432 case FLASH_5752PAGE_SIZE_256:
14433 tp->nvram_pagesize = 256;
14434 break;
14435 case FLASH_5752PAGE_SIZE_512:
14436 tp->nvram_pagesize = 512;
14437 break;
14438 case FLASH_5752PAGE_SIZE_1K:
14439 tp->nvram_pagesize = 1024;
14440 break;
14441 case FLASH_5752PAGE_SIZE_2K:
14442 tp->nvram_pagesize = 2048;
14443 break;
14444 case FLASH_5752PAGE_SIZE_4K:
14445 tp->nvram_pagesize = 4096;
14446 break;
14447 case FLASH_5752PAGE_SIZE_264:
14448 tp->nvram_pagesize = 264;
14449 break;
14450 case FLASH_5752PAGE_SIZE_528:
14451 tp->nvram_pagesize = 528;
14452 break;
14453 }
14454 }
14455
tg3_get_5752_nvram_info(struct tg3 * tp)14456 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14457 {
14458 u32 nvcfg1;
14459
14460 nvcfg1 = tr32(NVRAM_CFG1);
14461
14462 /* NVRAM protection for TPM */
14463 if (nvcfg1 & (1 << 27))
14464 tg3_flag_set(tp, PROTECTED_NVRAM);
14465
14466 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14467 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14468 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14469 tp->nvram_jedecnum = JEDEC_ATMEL;
14470 tg3_flag_set(tp, NVRAM_BUFFERED);
14471 break;
14472 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14473 tp->nvram_jedecnum = JEDEC_ATMEL;
14474 tg3_flag_set(tp, NVRAM_BUFFERED);
14475 tg3_flag_set(tp, FLASH);
14476 break;
14477 case FLASH_5752VENDOR_ST_M45PE10:
14478 case FLASH_5752VENDOR_ST_M45PE20:
14479 case FLASH_5752VENDOR_ST_M45PE40:
14480 tp->nvram_jedecnum = JEDEC_ST;
14481 tg3_flag_set(tp, NVRAM_BUFFERED);
14482 tg3_flag_set(tp, FLASH);
14483 break;
14484 }
14485
14486 if (tg3_flag(tp, FLASH)) {
14487 tg3_nvram_get_pagesize(tp, nvcfg1);
14488 } else {
14489 /* For eeprom, set pagesize to maximum eeprom size */
14490 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14491
14492 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14493 tw32(NVRAM_CFG1, nvcfg1);
14494 }
14495 }
14496
tg3_get_5755_nvram_info(struct tg3 * tp)14497 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14498 {
14499 u32 nvcfg1, protect = 0;
14500
14501 nvcfg1 = tr32(NVRAM_CFG1);
14502
14503 /* NVRAM protection for TPM */
14504 if (nvcfg1 & (1 << 27)) {
14505 tg3_flag_set(tp, PROTECTED_NVRAM);
14506 protect = 1;
14507 }
14508
14509 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14510 switch (nvcfg1) {
14511 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14512 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14513 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14514 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14515 tp->nvram_jedecnum = JEDEC_ATMEL;
14516 tg3_flag_set(tp, NVRAM_BUFFERED);
14517 tg3_flag_set(tp, FLASH);
14518 tp->nvram_pagesize = 264;
14519 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14520 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14521 tp->nvram_size = (protect ? 0x3e200 :
14522 TG3_NVRAM_SIZE_512KB);
14523 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14524 tp->nvram_size = (protect ? 0x1f200 :
14525 TG3_NVRAM_SIZE_256KB);
14526 else
14527 tp->nvram_size = (protect ? 0x1f200 :
14528 TG3_NVRAM_SIZE_128KB);
14529 break;
14530 case FLASH_5752VENDOR_ST_M45PE10:
14531 case FLASH_5752VENDOR_ST_M45PE20:
14532 case FLASH_5752VENDOR_ST_M45PE40:
14533 tp->nvram_jedecnum = JEDEC_ST;
14534 tg3_flag_set(tp, NVRAM_BUFFERED);
14535 tg3_flag_set(tp, FLASH);
14536 tp->nvram_pagesize = 256;
14537 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14538 tp->nvram_size = (protect ?
14539 TG3_NVRAM_SIZE_64KB :
14540 TG3_NVRAM_SIZE_128KB);
14541 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14542 tp->nvram_size = (protect ?
14543 TG3_NVRAM_SIZE_64KB :
14544 TG3_NVRAM_SIZE_256KB);
14545 else
14546 tp->nvram_size = (protect ?
14547 TG3_NVRAM_SIZE_128KB :
14548 TG3_NVRAM_SIZE_512KB);
14549 break;
14550 }
14551 }
14552
tg3_get_5787_nvram_info(struct tg3 * tp)14553 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14554 {
14555 u32 nvcfg1;
14556
14557 nvcfg1 = tr32(NVRAM_CFG1);
14558
14559 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14560 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14561 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14562 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14563 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14564 tp->nvram_jedecnum = JEDEC_ATMEL;
14565 tg3_flag_set(tp, NVRAM_BUFFERED);
14566 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14567
14568 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14569 tw32(NVRAM_CFG1, nvcfg1);
14570 break;
14571 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14572 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14573 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14574 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14575 tp->nvram_jedecnum = JEDEC_ATMEL;
14576 tg3_flag_set(tp, NVRAM_BUFFERED);
14577 tg3_flag_set(tp, FLASH);
14578 tp->nvram_pagesize = 264;
14579 break;
14580 case FLASH_5752VENDOR_ST_M45PE10:
14581 case FLASH_5752VENDOR_ST_M45PE20:
14582 case FLASH_5752VENDOR_ST_M45PE40:
14583 tp->nvram_jedecnum = JEDEC_ST;
14584 tg3_flag_set(tp, NVRAM_BUFFERED);
14585 tg3_flag_set(tp, FLASH);
14586 tp->nvram_pagesize = 256;
14587 break;
14588 }
14589 }
14590
tg3_get_5761_nvram_info(struct tg3 * tp)14591 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14592 {
14593 u32 nvcfg1, protect = 0;
14594
14595 nvcfg1 = tr32(NVRAM_CFG1);
14596
14597 /* NVRAM protection for TPM */
14598 if (nvcfg1 & (1 << 27)) {
14599 tg3_flag_set(tp, PROTECTED_NVRAM);
14600 protect = 1;
14601 }
14602
14603 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14604 switch (nvcfg1) {
14605 case FLASH_5761VENDOR_ATMEL_ADB021D:
14606 case FLASH_5761VENDOR_ATMEL_ADB041D:
14607 case FLASH_5761VENDOR_ATMEL_ADB081D:
14608 case FLASH_5761VENDOR_ATMEL_ADB161D:
14609 case FLASH_5761VENDOR_ATMEL_MDB021D:
14610 case FLASH_5761VENDOR_ATMEL_MDB041D:
14611 case FLASH_5761VENDOR_ATMEL_MDB081D:
14612 case FLASH_5761VENDOR_ATMEL_MDB161D:
14613 tp->nvram_jedecnum = JEDEC_ATMEL;
14614 tg3_flag_set(tp, NVRAM_BUFFERED);
14615 tg3_flag_set(tp, FLASH);
14616 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14617 tp->nvram_pagesize = 256;
14618 break;
14619 case FLASH_5761VENDOR_ST_A_M45PE20:
14620 case FLASH_5761VENDOR_ST_A_M45PE40:
14621 case FLASH_5761VENDOR_ST_A_M45PE80:
14622 case FLASH_5761VENDOR_ST_A_M45PE16:
14623 case FLASH_5761VENDOR_ST_M_M45PE20:
14624 case FLASH_5761VENDOR_ST_M_M45PE40:
14625 case FLASH_5761VENDOR_ST_M_M45PE80:
14626 case FLASH_5761VENDOR_ST_M_M45PE16:
14627 tp->nvram_jedecnum = JEDEC_ST;
14628 tg3_flag_set(tp, NVRAM_BUFFERED);
14629 tg3_flag_set(tp, FLASH);
14630 tp->nvram_pagesize = 256;
14631 break;
14632 }
14633
14634 if (protect) {
14635 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14636 } else {
14637 switch (nvcfg1) {
14638 case FLASH_5761VENDOR_ATMEL_ADB161D:
14639 case FLASH_5761VENDOR_ATMEL_MDB161D:
14640 case FLASH_5761VENDOR_ST_A_M45PE16:
14641 case FLASH_5761VENDOR_ST_M_M45PE16:
14642 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14643 break;
14644 case FLASH_5761VENDOR_ATMEL_ADB081D:
14645 case FLASH_5761VENDOR_ATMEL_MDB081D:
14646 case FLASH_5761VENDOR_ST_A_M45PE80:
14647 case FLASH_5761VENDOR_ST_M_M45PE80:
14648 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14649 break;
14650 case FLASH_5761VENDOR_ATMEL_ADB041D:
14651 case FLASH_5761VENDOR_ATMEL_MDB041D:
14652 case FLASH_5761VENDOR_ST_A_M45PE40:
14653 case FLASH_5761VENDOR_ST_M_M45PE40:
14654 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14655 break;
14656 case FLASH_5761VENDOR_ATMEL_ADB021D:
14657 case FLASH_5761VENDOR_ATMEL_MDB021D:
14658 case FLASH_5761VENDOR_ST_A_M45PE20:
14659 case FLASH_5761VENDOR_ST_M_M45PE20:
14660 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14661 break;
14662 }
14663 }
14664 }
14665
tg3_get_5906_nvram_info(struct tg3 * tp)14666 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14667 {
14668 tp->nvram_jedecnum = JEDEC_ATMEL;
14669 tg3_flag_set(tp, NVRAM_BUFFERED);
14670 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14671 }
14672
tg3_get_57780_nvram_info(struct tg3 * tp)14673 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14674 {
14675 u32 nvcfg1;
14676
14677 nvcfg1 = tr32(NVRAM_CFG1);
14678
14679 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14680 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14681 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14682 tp->nvram_jedecnum = JEDEC_ATMEL;
14683 tg3_flag_set(tp, NVRAM_BUFFERED);
14684 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14685
14686 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14687 tw32(NVRAM_CFG1, nvcfg1);
14688 return;
14689 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14690 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14691 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14692 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14693 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14694 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14695 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14696 tp->nvram_jedecnum = JEDEC_ATMEL;
14697 tg3_flag_set(tp, NVRAM_BUFFERED);
14698 tg3_flag_set(tp, FLASH);
14699
14700 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14701 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14702 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14703 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14704 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14705 break;
14706 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14707 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14708 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14709 break;
14710 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14711 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14712 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14713 break;
14714 }
14715 break;
14716 case FLASH_5752VENDOR_ST_M45PE10:
14717 case FLASH_5752VENDOR_ST_M45PE20:
14718 case FLASH_5752VENDOR_ST_M45PE40:
14719 tp->nvram_jedecnum = JEDEC_ST;
14720 tg3_flag_set(tp, NVRAM_BUFFERED);
14721 tg3_flag_set(tp, FLASH);
14722
14723 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14724 case FLASH_5752VENDOR_ST_M45PE10:
14725 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14726 break;
14727 case FLASH_5752VENDOR_ST_M45PE20:
14728 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14729 break;
14730 case FLASH_5752VENDOR_ST_M45PE40:
14731 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14732 break;
14733 }
14734 break;
14735 default:
14736 tg3_flag_set(tp, NO_NVRAM);
14737 return;
14738 }
14739
14740 tg3_nvram_get_pagesize(tp, nvcfg1);
14741 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14742 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14743 }
14744
14745
tg3_get_5717_nvram_info(struct tg3 * tp)14746 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14747 {
14748 u32 nvcfg1;
14749
14750 nvcfg1 = tr32(NVRAM_CFG1);
14751
14752 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14753 case FLASH_5717VENDOR_ATMEL_EEPROM:
14754 case FLASH_5717VENDOR_MICRO_EEPROM:
14755 tp->nvram_jedecnum = JEDEC_ATMEL;
14756 tg3_flag_set(tp, NVRAM_BUFFERED);
14757 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14758
14759 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14760 tw32(NVRAM_CFG1, nvcfg1);
14761 return;
14762 case FLASH_5717VENDOR_ATMEL_MDB011D:
14763 case FLASH_5717VENDOR_ATMEL_ADB011B:
14764 case FLASH_5717VENDOR_ATMEL_ADB011D:
14765 case FLASH_5717VENDOR_ATMEL_MDB021D:
14766 case FLASH_5717VENDOR_ATMEL_ADB021B:
14767 case FLASH_5717VENDOR_ATMEL_ADB021D:
14768 case FLASH_5717VENDOR_ATMEL_45USPT:
14769 tp->nvram_jedecnum = JEDEC_ATMEL;
14770 tg3_flag_set(tp, NVRAM_BUFFERED);
14771 tg3_flag_set(tp, FLASH);
14772
14773 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14774 case FLASH_5717VENDOR_ATMEL_MDB021D:
14775 /* Detect size with tg3_nvram_get_size() */
14776 break;
14777 case FLASH_5717VENDOR_ATMEL_ADB021B:
14778 case FLASH_5717VENDOR_ATMEL_ADB021D:
14779 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14780 break;
14781 default:
14782 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14783 break;
14784 }
14785 break;
14786 case FLASH_5717VENDOR_ST_M_M25PE10:
14787 case FLASH_5717VENDOR_ST_A_M25PE10:
14788 case FLASH_5717VENDOR_ST_M_M45PE10:
14789 case FLASH_5717VENDOR_ST_A_M45PE10:
14790 case FLASH_5717VENDOR_ST_M_M25PE20:
14791 case FLASH_5717VENDOR_ST_A_M25PE20:
14792 case FLASH_5717VENDOR_ST_M_M45PE20:
14793 case FLASH_5717VENDOR_ST_A_M45PE20:
14794 case FLASH_5717VENDOR_ST_25USPT:
14795 case FLASH_5717VENDOR_ST_45USPT:
14796 tp->nvram_jedecnum = JEDEC_ST;
14797 tg3_flag_set(tp, NVRAM_BUFFERED);
14798 tg3_flag_set(tp, FLASH);
14799
14800 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14801 case FLASH_5717VENDOR_ST_M_M25PE20:
14802 case FLASH_5717VENDOR_ST_M_M45PE20:
14803 /* Detect size with tg3_nvram_get_size() */
14804 break;
14805 case FLASH_5717VENDOR_ST_A_M25PE20:
14806 case FLASH_5717VENDOR_ST_A_M45PE20:
14807 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14808 break;
14809 default:
14810 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14811 break;
14812 }
14813 break;
14814 default:
14815 tg3_flag_set(tp, NO_NVRAM);
14816 return;
14817 }
14818
14819 tg3_nvram_get_pagesize(tp, nvcfg1);
14820 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14821 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14822 }
14823
tg3_get_5720_nvram_info(struct tg3 * tp)14824 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14825 {
14826 u32 nvcfg1, nvmpinstrp;
14827
14828 nvcfg1 = tr32(NVRAM_CFG1);
14829 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14830
14831 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14832 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14833 tg3_flag_set(tp, NO_NVRAM);
14834 return;
14835 }
14836
14837 switch (nvmpinstrp) {
14838 case FLASH_5762_EEPROM_HD:
14839 nvmpinstrp = FLASH_5720_EEPROM_HD;
14840 break;
14841 case FLASH_5762_EEPROM_LD:
14842 nvmpinstrp = FLASH_5720_EEPROM_LD;
14843 break;
14844 case FLASH_5720VENDOR_M_ST_M45PE20:
14845 /* This pinstrap supports multiple sizes, so force it
14846 * to read the actual size from location 0xf0.
14847 */
14848 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14849 break;
14850 }
14851 }
14852
14853 switch (nvmpinstrp) {
14854 case FLASH_5720_EEPROM_HD:
14855 case FLASH_5720_EEPROM_LD:
14856 tp->nvram_jedecnum = JEDEC_ATMEL;
14857 tg3_flag_set(tp, NVRAM_BUFFERED);
14858
14859 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14860 tw32(NVRAM_CFG1, nvcfg1);
14861 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14862 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14863 else
14864 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14865 return;
14866 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14867 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14868 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14869 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14870 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14871 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14872 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14873 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14874 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14875 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14876 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14877 case FLASH_5720VENDOR_ATMEL_45USPT:
14878 tp->nvram_jedecnum = JEDEC_ATMEL;
14879 tg3_flag_set(tp, NVRAM_BUFFERED);
14880 tg3_flag_set(tp, FLASH);
14881
14882 switch (nvmpinstrp) {
14883 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14884 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14885 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14886 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14887 break;
14888 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14889 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14890 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14891 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14892 break;
14893 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14894 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14895 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14896 break;
14897 default:
14898 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14899 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14900 break;
14901 }
14902 break;
14903 case FLASH_5720VENDOR_M_ST_M25PE10:
14904 case FLASH_5720VENDOR_M_ST_M45PE10:
14905 case FLASH_5720VENDOR_A_ST_M25PE10:
14906 case FLASH_5720VENDOR_A_ST_M45PE10:
14907 case FLASH_5720VENDOR_M_ST_M25PE20:
14908 case FLASH_5720VENDOR_M_ST_M45PE20:
14909 case FLASH_5720VENDOR_A_ST_M25PE20:
14910 case FLASH_5720VENDOR_A_ST_M45PE20:
14911 case FLASH_5720VENDOR_M_ST_M25PE40:
14912 case FLASH_5720VENDOR_M_ST_M45PE40:
14913 case FLASH_5720VENDOR_A_ST_M25PE40:
14914 case FLASH_5720VENDOR_A_ST_M45PE40:
14915 case FLASH_5720VENDOR_M_ST_M25PE80:
14916 case FLASH_5720VENDOR_M_ST_M45PE80:
14917 case FLASH_5720VENDOR_A_ST_M25PE80:
14918 case FLASH_5720VENDOR_A_ST_M45PE80:
14919 case FLASH_5720VENDOR_ST_25USPT:
14920 case FLASH_5720VENDOR_ST_45USPT:
14921 tp->nvram_jedecnum = JEDEC_ST;
14922 tg3_flag_set(tp, NVRAM_BUFFERED);
14923 tg3_flag_set(tp, FLASH);
14924
14925 switch (nvmpinstrp) {
14926 case FLASH_5720VENDOR_M_ST_M25PE20:
14927 case FLASH_5720VENDOR_M_ST_M45PE20:
14928 case FLASH_5720VENDOR_A_ST_M25PE20:
14929 case FLASH_5720VENDOR_A_ST_M45PE20:
14930 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14931 break;
14932 case FLASH_5720VENDOR_M_ST_M25PE40:
14933 case FLASH_5720VENDOR_M_ST_M45PE40:
14934 case FLASH_5720VENDOR_A_ST_M25PE40:
14935 case FLASH_5720VENDOR_A_ST_M45PE40:
14936 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14937 break;
14938 case FLASH_5720VENDOR_M_ST_M25PE80:
14939 case FLASH_5720VENDOR_M_ST_M45PE80:
14940 case FLASH_5720VENDOR_A_ST_M25PE80:
14941 case FLASH_5720VENDOR_A_ST_M45PE80:
14942 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14943 break;
14944 default:
14945 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14946 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14947 break;
14948 }
14949 break;
14950 default:
14951 tg3_flag_set(tp, NO_NVRAM);
14952 return;
14953 }
14954
14955 tg3_nvram_get_pagesize(tp, nvcfg1);
14956 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14957 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14958
14959 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14960 u32 val;
14961
14962 if (tg3_nvram_read(tp, 0, &val))
14963 return;
14964
14965 if (val != TG3_EEPROM_MAGIC &&
14966 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14967 tg3_flag_set(tp, NO_NVRAM);
14968 }
14969 }
14970
14971 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
tg3_nvram_init(struct tg3 * tp)14972 static void tg3_nvram_init(struct tg3 *tp)
14973 {
14974 if (tg3_flag(tp, IS_SSB_CORE)) {
14975 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14976 tg3_flag_clear(tp, NVRAM);
14977 tg3_flag_clear(tp, NVRAM_BUFFERED);
14978 tg3_flag_set(tp, NO_NVRAM);
14979 return;
14980 }
14981
14982 tw32_f(GRC_EEPROM_ADDR,
14983 (EEPROM_ADDR_FSM_RESET |
14984 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14985 EEPROM_ADDR_CLKPERD_SHIFT)));
14986
14987 msleep(1);
14988
14989 /* Enable seeprom accesses. */
14990 tw32_f(GRC_LOCAL_CTRL,
14991 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14992 udelay(100);
14993
14994 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14995 tg3_asic_rev(tp) != ASIC_REV_5701) {
14996 tg3_flag_set(tp, NVRAM);
14997
14998 if (tg3_nvram_lock(tp)) {
14999 netdev_warn(tp->dev,
15000 "Cannot get nvram lock, %s failed\n",
15001 __func__);
15002 return;
15003 }
15004 tg3_enable_nvram_access(tp);
15005
15006 tp->nvram_size = 0;
15007
15008 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15009 tg3_get_5752_nvram_info(tp);
15010 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15011 tg3_get_5755_nvram_info(tp);
15012 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15013 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15014 tg3_asic_rev(tp) == ASIC_REV_5785)
15015 tg3_get_5787_nvram_info(tp);
15016 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15017 tg3_get_5761_nvram_info(tp);
15018 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15019 tg3_get_5906_nvram_info(tp);
15020 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15021 tg3_flag(tp, 57765_CLASS))
15022 tg3_get_57780_nvram_info(tp);
15023 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15024 tg3_asic_rev(tp) == ASIC_REV_5719)
15025 tg3_get_5717_nvram_info(tp);
15026 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15027 tg3_asic_rev(tp) == ASIC_REV_5762)
15028 tg3_get_5720_nvram_info(tp);
15029 else
15030 tg3_get_nvram_info(tp);
15031
15032 if (tp->nvram_size == 0)
15033 tg3_get_nvram_size(tp);
15034
15035 tg3_disable_nvram_access(tp);
15036 tg3_nvram_unlock(tp);
15037
15038 } else {
15039 tg3_flag_clear(tp, NVRAM);
15040 tg3_flag_clear(tp, NVRAM_BUFFERED);
15041
15042 tg3_get_eeprom_size(tp);
15043 }
15044 }
15045
15046 struct subsys_tbl_ent {
15047 u16 subsys_vendor, subsys_devid;
15048 u32 phy_id;
15049 };
15050
15051 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15052 /* Broadcom boards. */
15053 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15054 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15055 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15056 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15057 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15058 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15059 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15060 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15061 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15062 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15063 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15064 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15065 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15066 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15067 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15068 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15069 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15070 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15071 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15072 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15073 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15074 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15075
15076 /* 3com boards. */
15077 { TG3PCI_SUBVENDOR_ID_3COM,
15078 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15079 { TG3PCI_SUBVENDOR_ID_3COM,
15080 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15081 { TG3PCI_SUBVENDOR_ID_3COM,
15082 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15083 { TG3PCI_SUBVENDOR_ID_3COM,
15084 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15085 { TG3PCI_SUBVENDOR_ID_3COM,
15086 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15087
15088 /* DELL boards. */
15089 { TG3PCI_SUBVENDOR_ID_DELL,
15090 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15091 { TG3PCI_SUBVENDOR_ID_DELL,
15092 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15093 { TG3PCI_SUBVENDOR_ID_DELL,
15094 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15095 { TG3PCI_SUBVENDOR_ID_DELL,
15096 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15097
15098 /* Compaq boards. */
15099 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15100 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15101 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15102 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15103 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15104 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15105 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15106 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15107 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15108 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15109
15110 /* IBM boards. */
15111 { TG3PCI_SUBVENDOR_ID_IBM,
15112 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15113 };
15114
tg3_lookup_by_subsys(struct tg3 * tp)15115 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15116 {
15117 int i;
15118
15119 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15120 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15121 tp->pdev->subsystem_vendor) &&
15122 (subsys_id_to_phy_id[i].subsys_devid ==
15123 tp->pdev->subsystem_device))
15124 return &subsys_id_to_phy_id[i];
15125 }
15126 return NULL;
15127 }
15128
tg3_get_eeprom_hw_cfg(struct tg3 * tp)15129 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15130 {
15131 u32 val;
15132
15133 tp->phy_id = TG3_PHY_ID_INVALID;
15134 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15135
15136 /* Assume an onboard device and WOL capable by default. */
15137 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15138 tg3_flag_set(tp, WOL_CAP);
15139
15140 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15141 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15142 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15143 tg3_flag_set(tp, IS_NIC);
15144 }
15145 val = tr32(VCPU_CFGSHDW);
15146 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15147 tg3_flag_set(tp, ASPM_WORKAROUND);
15148 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15149 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15150 tg3_flag_set(tp, WOL_ENABLE);
15151 device_set_wakeup_enable(&tp->pdev->dev, true);
15152 }
15153 goto done;
15154 }
15155
15156 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15157 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15158 u32 nic_cfg, led_cfg;
15159 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15160 u32 nic_phy_id, ver, eeprom_phy_id;
15161 int eeprom_phy_serdes = 0;
15162
15163 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15164 tp->nic_sram_data_cfg = nic_cfg;
15165
15166 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15167 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15168 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15169 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15170 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15171 (ver > 0) && (ver < 0x100))
15172 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15173
15174 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15175 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15176
15177 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15178 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15179 tg3_asic_rev(tp) == ASIC_REV_5720)
15180 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15181
15182 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15183 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15184 eeprom_phy_serdes = 1;
15185
15186 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15187 if (nic_phy_id != 0) {
15188 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15189 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15190
15191 eeprom_phy_id = (id1 >> 16) << 10;
15192 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15193 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15194 } else
15195 eeprom_phy_id = 0;
15196
15197 tp->phy_id = eeprom_phy_id;
15198 if (eeprom_phy_serdes) {
15199 if (!tg3_flag(tp, 5705_PLUS))
15200 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15201 else
15202 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15203 }
15204
15205 if (tg3_flag(tp, 5750_PLUS))
15206 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15207 SHASTA_EXT_LED_MODE_MASK);
15208 else
15209 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15210
15211 switch (led_cfg) {
15212 default:
15213 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15214 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15215 break;
15216
15217 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15218 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15219 break;
15220
15221 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15222 tp->led_ctrl = LED_CTRL_MODE_MAC;
15223
15224 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15225 * read on some older 5700/5701 bootcode.
15226 */
15227 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15228 tg3_asic_rev(tp) == ASIC_REV_5701)
15229 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15230
15231 break;
15232
15233 case SHASTA_EXT_LED_SHARED:
15234 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15235 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15236 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15237 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15238 LED_CTRL_MODE_PHY_2);
15239
15240 if (tg3_flag(tp, 5717_PLUS) ||
15241 tg3_asic_rev(tp) == ASIC_REV_5762)
15242 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15243 LED_CTRL_BLINK_RATE_MASK;
15244
15245 break;
15246
15247 case SHASTA_EXT_LED_MAC:
15248 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15249 break;
15250
15251 case SHASTA_EXT_LED_COMBO:
15252 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15253 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15254 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15255 LED_CTRL_MODE_PHY_2);
15256 break;
15257
15258 }
15259
15260 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15261 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15262 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15263 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15264
15265 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15266 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15267
15268 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15269 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15270 if ((tp->pdev->subsystem_vendor ==
15271 PCI_VENDOR_ID_ARIMA) &&
15272 (tp->pdev->subsystem_device == 0x205a ||
15273 tp->pdev->subsystem_device == 0x2063))
15274 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15275 } else {
15276 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15277 tg3_flag_set(tp, IS_NIC);
15278 }
15279
15280 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15281 tg3_flag_set(tp, ENABLE_ASF);
15282 if (tg3_flag(tp, 5750_PLUS))
15283 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15284 }
15285
15286 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15287 tg3_flag(tp, 5750_PLUS))
15288 tg3_flag_set(tp, ENABLE_APE);
15289
15290 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15291 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15292 tg3_flag_clear(tp, WOL_CAP);
15293
15294 if (tg3_flag(tp, WOL_CAP) &&
15295 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15296 tg3_flag_set(tp, WOL_ENABLE);
15297 device_set_wakeup_enable(&tp->pdev->dev, true);
15298 }
15299
15300 if (cfg2 & (1 << 17))
15301 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15302
15303 /* serdes signal pre-emphasis in register 0x590 set by */
15304 /* bootcode if bit 18 is set */
15305 if (cfg2 & (1 << 18))
15306 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15307
15308 if ((tg3_flag(tp, 57765_PLUS) ||
15309 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15310 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15311 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15312 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15313
15314 if (tg3_flag(tp, PCI_EXPRESS)) {
15315 u32 cfg3;
15316
15317 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15318 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15319 !tg3_flag(tp, 57765_PLUS) &&
15320 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15321 tg3_flag_set(tp, ASPM_WORKAROUND);
15322 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15323 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15324 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15325 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15326 }
15327
15328 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15329 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15330 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15331 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15332 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15333 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15334
15335 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15336 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15337 }
15338 done:
15339 if (tg3_flag(tp, WOL_CAP))
15340 device_set_wakeup_enable(&tp->pdev->dev,
15341 tg3_flag(tp, WOL_ENABLE));
15342 else
15343 device_set_wakeup_capable(&tp->pdev->dev, false);
15344 }
15345
tg3_ape_otp_read(struct tg3 * tp,u32 offset,u32 * val)15346 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15347 {
15348 int i, err;
15349 u32 val2, off = offset * 8;
15350
15351 err = tg3_nvram_lock(tp);
15352 if (err)
15353 return err;
15354
15355 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15356 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15357 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15358 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15359 udelay(10);
15360
15361 for (i = 0; i < 100; i++) {
15362 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15363 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15364 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15365 break;
15366 }
15367 udelay(10);
15368 }
15369
15370 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15371
15372 tg3_nvram_unlock(tp);
15373 if (val2 & APE_OTP_STATUS_CMD_DONE)
15374 return 0;
15375
15376 return -EBUSY;
15377 }
15378
tg3_issue_otp_command(struct tg3 * tp,u32 cmd)15379 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15380 {
15381 int i;
15382 u32 val;
15383
15384 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15385 tw32(OTP_CTRL, cmd);
15386
15387 /* Wait for up to 1 ms for command to execute. */
15388 for (i = 0; i < 100; i++) {
15389 val = tr32(OTP_STATUS);
15390 if (val & OTP_STATUS_CMD_DONE)
15391 break;
15392 udelay(10);
15393 }
15394
15395 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15396 }
15397
15398 /* Read the gphy configuration from the OTP region of the chip. The gphy
15399 * configuration is a 32-bit value that straddles the alignment boundary.
15400 * We do two 32-bit reads and then shift and merge the results.
15401 */
tg3_read_otp_phycfg(struct tg3 * tp)15402 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15403 {
15404 u32 bhalf_otp, thalf_otp;
15405
15406 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15407
15408 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15409 return 0;
15410
15411 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15412
15413 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15414 return 0;
15415
15416 thalf_otp = tr32(OTP_READ_DATA);
15417
15418 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15419
15420 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15421 return 0;
15422
15423 bhalf_otp = tr32(OTP_READ_DATA);
15424
15425 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15426 }
15427
tg3_phy_init_link_config(struct tg3 * tp)15428 static void tg3_phy_init_link_config(struct tg3 *tp)
15429 {
15430 u32 adv = ADVERTISED_Autoneg;
15431
15432 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15433 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15434 adv |= ADVERTISED_1000baseT_Half;
15435 adv |= ADVERTISED_1000baseT_Full;
15436 }
15437
15438 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15439 adv |= ADVERTISED_100baseT_Half |
15440 ADVERTISED_100baseT_Full |
15441 ADVERTISED_10baseT_Half |
15442 ADVERTISED_10baseT_Full |
15443 ADVERTISED_TP;
15444 else
15445 adv |= ADVERTISED_FIBRE;
15446
15447 tp->link_config.advertising = adv;
15448 tp->link_config.speed = SPEED_UNKNOWN;
15449 tp->link_config.duplex = DUPLEX_UNKNOWN;
15450 tp->link_config.autoneg = AUTONEG_ENABLE;
15451 tp->link_config.active_speed = SPEED_UNKNOWN;
15452 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15453
15454 tp->old_link = -1;
15455 }
15456
tg3_phy_probe(struct tg3 * tp)15457 static int tg3_phy_probe(struct tg3 *tp)
15458 {
15459 u32 hw_phy_id_1, hw_phy_id_2;
15460 u32 hw_phy_id, hw_phy_id_masked;
15461 int err;
15462
15463 /* flow control autonegotiation is default behavior */
15464 tg3_flag_set(tp, PAUSE_AUTONEG);
15465 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15466
15467 if (tg3_flag(tp, ENABLE_APE)) {
15468 switch (tp->pci_fn) {
15469 case 0:
15470 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15471 break;
15472 case 1:
15473 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15474 break;
15475 case 2:
15476 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15477 break;
15478 case 3:
15479 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15480 break;
15481 }
15482 }
15483
15484 if (!tg3_flag(tp, ENABLE_ASF) &&
15485 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15486 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15487 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15488 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15489
15490 if (tg3_flag(tp, USE_PHYLIB))
15491 return tg3_phy_init(tp);
15492
15493 /* Reading the PHY ID register can conflict with ASF
15494 * firmware access to the PHY hardware.
15495 */
15496 err = 0;
15497 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15498 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15499 } else {
15500 /* Now read the physical PHY_ID from the chip and verify
15501 * that it is sane. If it doesn't look good, we fall back
15502 * to either the hard-coded table based PHY_ID and failing
15503 * that the value found in the eeprom area.
15504 */
15505 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15506 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15507
15508 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15509 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15510 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15511
15512 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15513 }
15514
15515 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15516 tp->phy_id = hw_phy_id;
15517 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15518 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15519 else
15520 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15521 } else {
15522 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15523 /* Do nothing, phy ID already set up in
15524 * tg3_get_eeprom_hw_cfg().
15525 */
15526 } else {
15527 struct subsys_tbl_ent *p;
15528
15529 /* No eeprom signature? Try the hardcoded
15530 * subsys device table.
15531 */
15532 p = tg3_lookup_by_subsys(tp);
15533 if (p) {
15534 tp->phy_id = p->phy_id;
15535 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15536 /* For now we saw the IDs 0xbc050cd0,
15537 * 0xbc050f80 and 0xbc050c30 on devices
15538 * connected to an BCM4785 and there are
15539 * probably more. Just assume that the phy is
15540 * supported when it is connected to a SSB core
15541 * for now.
15542 */
15543 return -ENODEV;
15544 }
15545
15546 if (!tp->phy_id ||
15547 tp->phy_id == TG3_PHY_ID_BCM8002)
15548 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15549 }
15550 }
15551
15552 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15553 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15554 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15555 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15556 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15557 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15558 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15559 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15560 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15561 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15562
15563 tp->eee.supported = SUPPORTED_100baseT_Full |
15564 SUPPORTED_1000baseT_Full;
15565 tp->eee.advertised = ADVERTISED_100baseT_Full |
15566 ADVERTISED_1000baseT_Full;
15567 tp->eee.eee_enabled = 1;
15568 tp->eee.tx_lpi_enabled = 1;
15569 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15570 }
15571
15572 tg3_phy_init_link_config(tp);
15573
15574 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15575 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15576 !tg3_flag(tp, ENABLE_APE) &&
15577 !tg3_flag(tp, ENABLE_ASF)) {
15578 u32 bmsr, dummy;
15579
15580 tg3_readphy(tp, MII_BMSR, &bmsr);
15581 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15582 (bmsr & BMSR_LSTATUS))
15583 goto skip_phy_reset;
15584
15585 err = tg3_phy_reset(tp);
15586 if (err)
15587 return err;
15588
15589 tg3_phy_set_wirespeed(tp);
15590
15591 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15592 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15593 tp->link_config.flowctrl);
15594
15595 tg3_writephy(tp, MII_BMCR,
15596 BMCR_ANENABLE | BMCR_ANRESTART);
15597 }
15598 }
15599
15600 skip_phy_reset:
15601 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15602 err = tg3_init_5401phy_dsp(tp);
15603 if (err)
15604 return err;
15605
15606 err = tg3_init_5401phy_dsp(tp);
15607 }
15608
15609 return err;
15610 }
15611
tg3_read_vpd(struct tg3 * tp)15612 static void tg3_read_vpd(struct tg3 *tp)
15613 {
15614 u8 *vpd_data;
15615 unsigned int block_end, rosize, len;
15616 u32 vpdlen;
15617 int j, i = 0;
15618
15619 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15620 if (!vpd_data)
15621 goto out_no_vpd;
15622
15623 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15624 if (i < 0)
15625 goto out_not_found;
15626
15627 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15628 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15629 i += PCI_VPD_LRDT_TAG_SIZE;
15630
15631 if (block_end > vpdlen)
15632 goto out_not_found;
15633
15634 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15635 PCI_VPD_RO_KEYWORD_MFR_ID);
15636 if (j > 0) {
15637 len = pci_vpd_info_field_size(&vpd_data[j]);
15638
15639 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15640 if (j + len > block_end || len != 4 ||
15641 memcmp(&vpd_data[j], "1028", 4))
15642 goto partno;
15643
15644 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15645 PCI_VPD_RO_KEYWORD_VENDOR0);
15646 if (j < 0)
15647 goto partno;
15648
15649 len = pci_vpd_info_field_size(&vpd_data[j]);
15650
15651 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15652 if (j + len > block_end)
15653 goto partno;
15654
15655 if (len >= sizeof(tp->fw_ver))
15656 len = sizeof(tp->fw_ver) - 1;
15657 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15658 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15659 &vpd_data[j]);
15660 }
15661
15662 partno:
15663 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15664 PCI_VPD_RO_KEYWORD_PARTNO);
15665 if (i < 0)
15666 goto out_not_found;
15667
15668 len = pci_vpd_info_field_size(&vpd_data[i]);
15669
15670 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15671 if (len > TG3_BPN_SIZE ||
15672 (len + i) > vpdlen)
15673 goto out_not_found;
15674
15675 memcpy(tp->board_part_number, &vpd_data[i], len);
15676
15677 out_not_found:
15678 kfree(vpd_data);
15679 if (tp->board_part_number[0])
15680 return;
15681
15682 out_no_vpd:
15683 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15684 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15685 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15686 strcpy(tp->board_part_number, "BCM5717");
15687 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15688 strcpy(tp->board_part_number, "BCM5718");
15689 else
15690 goto nomatch;
15691 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15692 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15693 strcpy(tp->board_part_number, "BCM57780");
15694 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15695 strcpy(tp->board_part_number, "BCM57760");
15696 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15697 strcpy(tp->board_part_number, "BCM57790");
15698 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15699 strcpy(tp->board_part_number, "BCM57788");
15700 else
15701 goto nomatch;
15702 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15703 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15704 strcpy(tp->board_part_number, "BCM57761");
15705 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15706 strcpy(tp->board_part_number, "BCM57765");
15707 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15708 strcpy(tp->board_part_number, "BCM57781");
15709 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15710 strcpy(tp->board_part_number, "BCM57785");
15711 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15712 strcpy(tp->board_part_number, "BCM57791");
15713 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15714 strcpy(tp->board_part_number, "BCM57795");
15715 else
15716 goto nomatch;
15717 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15718 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15719 strcpy(tp->board_part_number, "BCM57762");
15720 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15721 strcpy(tp->board_part_number, "BCM57766");
15722 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15723 strcpy(tp->board_part_number, "BCM57782");
15724 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15725 strcpy(tp->board_part_number, "BCM57786");
15726 else
15727 goto nomatch;
15728 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15729 strcpy(tp->board_part_number, "BCM95906");
15730 } else {
15731 nomatch:
15732 strcpy(tp->board_part_number, "none");
15733 }
15734 }
15735
tg3_fw_img_is_valid(struct tg3 * tp,u32 offset)15736 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15737 {
15738 u32 val;
15739
15740 if (tg3_nvram_read(tp, offset, &val) ||
15741 (val & 0xfc000000) != 0x0c000000 ||
15742 tg3_nvram_read(tp, offset + 4, &val) ||
15743 val != 0)
15744 return 0;
15745
15746 return 1;
15747 }
15748
tg3_read_bc_ver(struct tg3 * tp)15749 static void tg3_read_bc_ver(struct tg3 *tp)
15750 {
15751 u32 val, offset, start, ver_offset;
15752 int i, dst_off;
15753 bool newver = false;
15754
15755 if (tg3_nvram_read(tp, 0xc, &offset) ||
15756 tg3_nvram_read(tp, 0x4, &start))
15757 return;
15758
15759 offset = tg3_nvram_logical_addr(tp, offset);
15760
15761 if (tg3_nvram_read(tp, offset, &val))
15762 return;
15763
15764 if ((val & 0xfc000000) == 0x0c000000) {
15765 if (tg3_nvram_read(tp, offset + 4, &val))
15766 return;
15767
15768 if (val == 0)
15769 newver = true;
15770 }
15771
15772 dst_off = strlen(tp->fw_ver);
15773
15774 if (newver) {
15775 if (TG3_VER_SIZE - dst_off < 16 ||
15776 tg3_nvram_read(tp, offset + 8, &ver_offset))
15777 return;
15778
15779 offset = offset + ver_offset - start;
15780 for (i = 0; i < 16; i += 4) {
15781 __be32 v;
15782 if (tg3_nvram_read_be32(tp, offset + i, &v))
15783 return;
15784
15785 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15786 }
15787 } else {
15788 u32 major, minor;
15789
15790 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15791 return;
15792
15793 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15794 TG3_NVM_BCVER_MAJSFT;
15795 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15796 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15797 "v%d.%02d", major, minor);
15798 }
15799 }
15800
tg3_read_hwsb_ver(struct tg3 * tp)15801 static void tg3_read_hwsb_ver(struct tg3 *tp)
15802 {
15803 u32 val, major, minor;
15804
15805 /* Use native endian representation */
15806 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15807 return;
15808
15809 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15810 TG3_NVM_HWSB_CFG1_MAJSFT;
15811 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15812 TG3_NVM_HWSB_CFG1_MINSFT;
15813
15814 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15815 }
15816
tg3_read_sb_ver(struct tg3 * tp,u32 val)15817 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15818 {
15819 u32 offset, major, minor, build;
15820
15821 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15822
15823 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15824 return;
15825
15826 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15827 case TG3_EEPROM_SB_REVISION_0:
15828 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15829 break;
15830 case TG3_EEPROM_SB_REVISION_2:
15831 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15832 break;
15833 case TG3_EEPROM_SB_REVISION_3:
15834 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15835 break;
15836 case TG3_EEPROM_SB_REVISION_4:
15837 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15838 break;
15839 case TG3_EEPROM_SB_REVISION_5:
15840 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15841 break;
15842 case TG3_EEPROM_SB_REVISION_6:
15843 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15844 break;
15845 default:
15846 return;
15847 }
15848
15849 if (tg3_nvram_read(tp, offset, &val))
15850 return;
15851
15852 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15853 TG3_EEPROM_SB_EDH_BLD_SHFT;
15854 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15855 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15856 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15857
15858 if (minor > 99 || build > 26)
15859 return;
15860
15861 offset = strlen(tp->fw_ver);
15862 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15863 " v%d.%02d", major, minor);
15864
15865 if (build > 0) {
15866 offset = strlen(tp->fw_ver);
15867 if (offset < TG3_VER_SIZE - 1)
15868 tp->fw_ver[offset] = 'a' + build - 1;
15869 }
15870 }
15871
tg3_read_mgmtfw_ver(struct tg3 * tp)15872 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15873 {
15874 u32 val, offset, start;
15875 int i, vlen;
15876
15877 for (offset = TG3_NVM_DIR_START;
15878 offset < TG3_NVM_DIR_END;
15879 offset += TG3_NVM_DIRENT_SIZE) {
15880 if (tg3_nvram_read(tp, offset, &val))
15881 return;
15882
15883 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15884 break;
15885 }
15886
15887 if (offset == TG3_NVM_DIR_END)
15888 return;
15889
15890 if (!tg3_flag(tp, 5705_PLUS))
15891 start = 0x08000000;
15892 else if (tg3_nvram_read(tp, offset - 4, &start))
15893 return;
15894
15895 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15896 !tg3_fw_img_is_valid(tp, offset) ||
15897 tg3_nvram_read(tp, offset + 8, &val))
15898 return;
15899
15900 offset += val - start;
15901
15902 vlen = strlen(tp->fw_ver);
15903
15904 tp->fw_ver[vlen++] = ',';
15905 tp->fw_ver[vlen++] = ' ';
15906
15907 for (i = 0; i < 4; i++) {
15908 __be32 v;
15909 if (tg3_nvram_read_be32(tp, offset, &v))
15910 return;
15911
15912 offset += sizeof(v);
15913
15914 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15915 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15916 break;
15917 }
15918
15919 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15920 vlen += sizeof(v);
15921 }
15922 }
15923
tg3_probe_ncsi(struct tg3 * tp)15924 static void tg3_probe_ncsi(struct tg3 *tp)
15925 {
15926 u32 apedata;
15927
15928 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15929 if (apedata != APE_SEG_SIG_MAGIC)
15930 return;
15931
15932 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15933 if (!(apedata & APE_FW_STATUS_READY))
15934 return;
15935
15936 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15937 tg3_flag_set(tp, APE_HAS_NCSI);
15938 }
15939
tg3_read_dash_ver(struct tg3 * tp)15940 static void tg3_read_dash_ver(struct tg3 *tp)
15941 {
15942 int vlen;
15943 u32 apedata;
15944 char *fwtype;
15945
15946 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15947
15948 if (tg3_flag(tp, APE_HAS_NCSI))
15949 fwtype = "NCSI";
15950 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15951 fwtype = "SMASH";
15952 else
15953 fwtype = "DASH";
15954
15955 vlen = strlen(tp->fw_ver);
15956
15957 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15958 fwtype,
15959 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15960 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15961 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15962 (apedata & APE_FW_VERSION_BLDMSK));
15963 }
15964
tg3_read_otp_ver(struct tg3 * tp)15965 static void tg3_read_otp_ver(struct tg3 *tp)
15966 {
15967 u32 val, val2;
15968
15969 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15970 return;
15971
15972 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15973 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15974 TG3_OTP_MAGIC0_VALID(val)) {
15975 u64 val64 = (u64) val << 32 | val2;
15976 u32 ver = 0;
15977 int i, vlen;
15978
15979 for (i = 0; i < 7; i++) {
15980 if ((val64 & 0xff) == 0)
15981 break;
15982 ver = val64 & 0xff;
15983 val64 >>= 8;
15984 }
15985 vlen = strlen(tp->fw_ver);
15986 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15987 }
15988 }
15989
tg3_read_fw_ver(struct tg3 * tp)15990 static void tg3_read_fw_ver(struct tg3 *tp)
15991 {
15992 u32 val;
15993 bool vpd_vers = false;
15994
15995 if (tp->fw_ver[0] != 0)
15996 vpd_vers = true;
15997
15998 if (tg3_flag(tp, NO_NVRAM)) {
15999 strcat(tp->fw_ver, "sb");
16000 tg3_read_otp_ver(tp);
16001 return;
16002 }
16003
16004 if (tg3_nvram_read(tp, 0, &val))
16005 return;
16006
16007 if (val == TG3_EEPROM_MAGIC)
16008 tg3_read_bc_ver(tp);
16009 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16010 tg3_read_sb_ver(tp, val);
16011 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16012 tg3_read_hwsb_ver(tp);
16013
16014 if (tg3_flag(tp, ENABLE_ASF)) {
16015 if (tg3_flag(tp, ENABLE_APE)) {
16016 tg3_probe_ncsi(tp);
16017 if (!vpd_vers)
16018 tg3_read_dash_ver(tp);
16019 } else if (!vpd_vers) {
16020 tg3_read_mgmtfw_ver(tp);
16021 }
16022 }
16023
16024 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16025 }
16026
tg3_rx_ret_ring_size(struct tg3 * tp)16027 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16028 {
16029 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16030 return TG3_RX_RET_MAX_SIZE_5717;
16031 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16032 return TG3_RX_RET_MAX_SIZE_5700;
16033 else
16034 return TG3_RX_RET_MAX_SIZE_5705;
16035 }
16036
16037 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16038 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16039 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16040 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16041 { },
16042 };
16043
tg3_find_peer(struct tg3 * tp)16044 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16045 {
16046 struct pci_dev *peer;
16047 unsigned int func, devnr = tp->pdev->devfn & ~7;
16048
16049 for (func = 0; func < 8; func++) {
16050 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16051 if (peer && peer != tp->pdev)
16052 break;
16053 pci_dev_put(peer);
16054 }
16055 /* 5704 can be configured in single-port mode, set peer to
16056 * tp->pdev in that case.
16057 */
16058 if (!peer) {
16059 peer = tp->pdev;
16060 return peer;
16061 }
16062
16063 /*
16064 * We don't need to keep the refcount elevated; there's no way
16065 * to remove one half of this device without removing the other
16066 */
16067 pci_dev_put(peer);
16068
16069 return peer;
16070 }
16071
tg3_detect_asic_rev(struct tg3 * tp,u32 misc_ctrl_reg)16072 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16073 {
16074 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16075 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16076 u32 reg;
16077
16078 /* All devices that use the alternate
16079 * ASIC REV location have a CPMU.
16080 */
16081 tg3_flag_set(tp, CPMU_PRESENT);
16082
16083 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16084 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16085 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16086 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16087 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16088 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16089 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16090 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16091 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16092 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16093 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16094 reg = TG3PCI_GEN2_PRODID_ASICREV;
16095 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16096 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16097 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16098 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16099 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16100 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16101 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16102 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16103 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16104 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16105 reg = TG3PCI_GEN15_PRODID_ASICREV;
16106 else
16107 reg = TG3PCI_PRODID_ASICREV;
16108
16109 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16110 }
16111
16112 /* Wrong chip ID in 5752 A0. This code can be removed later
16113 * as A0 is not in production.
16114 */
16115 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16116 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16117
16118 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16119 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16120
16121 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16122 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16123 tg3_asic_rev(tp) == ASIC_REV_5720)
16124 tg3_flag_set(tp, 5717_PLUS);
16125
16126 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16127 tg3_asic_rev(tp) == ASIC_REV_57766)
16128 tg3_flag_set(tp, 57765_CLASS);
16129
16130 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16131 tg3_asic_rev(tp) == ASIC_REV_5762)
16132 tg3_flag_set(tp, 57765_PLUS);
16133
16134 /* Intentionally exclude ASIC_REV_5906 */
16135 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16136 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16137 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16138 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16139 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16140 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16141 tg3_flag(tp, 57765_PLUS))
16142 tg3_flag_set(tp, 5755_PLUS);
16143
16144 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16145 tg3_asic_rev(tp) == ASIC_REV_5714)
16146 tg3_flag_set(tp, 5780_CLASS);
16147
16148 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16149 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16150 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16151 tg3_flag(tp, 5755_PLUS) ||
16152 tg3_flag(tp, 5780_CLASS))
16153 tg3_flag_set(tp, 5750_PLUS);
16154
16155 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16156 tg3_flag(tp, 5750_PLUS))
16157 tg3_flag_set(tp, 5705_PLUS);
16158 }
16159
tg3_10_100_only_device(struct tg3 * tp,const struct pci_device_id * ent)16160 static bool tg3_10_100_only_device(struct tg3 *tp,
16161 const struct pci_device_id *ent)
16162 {
16163 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16164
16165 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16166 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16167 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16168 return true;
16169
16170 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16171 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16172 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16173 return true;
16174 } else {
16175 return true;
16176 }
16177 }
16178
16179 return false;
16180 }
16181
tg3_get_invariants(struct tg3 * tp,const struct pci_device_id * ent)16182 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16183 {
16184 u32 misc_ctrl_reg;
16185 u32 pci_state_reg, grc_misc_cfg;
16186 u32 val;
16187 u16 pci_cmd;
16188 int err;
16189
16190 /* Force memory write invalidate off. If we leave it on,
16191 * then on 5700_BX chips we have to enable a workaround.
16192 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16193 * to match the cacheline size. The Broadcom driver have this
16194 * workaround but turns MWI off all the times so never uses
16195 * it. This seems to suggest that the workaround is insufficient.
16196 */
16197 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16198 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16199 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16200
16201 /* Important! -- Make sure register accesses are byteswapped
16202 * correctly. Also, for those chips that require it, make
16203 * sure that indirect register accesses are enabled before
16204 * the first operation.
16205 */
16206 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16207 &misc_ctrl_reg);
16208 tp->misc_host_ctrl |= (misc_ctrl_reg &
16209 MISC_HOST_CTRL_CHIPREV);
16210 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16211 tp->misc_host_ctrl);
16212
16213 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16214
16215 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16216 * we need to disable memory and use config. cycles
16217 * only to access all registers. The 5702/03 chips
16218 * can mistakenly decode the special cycles from the
16219 * ICH chipsets as memory write cycles, causing corruption
16220 * of register and memory space. Only certain ICH bridges
16221 * will drive special cycles with non-zero data during the
16222 * address phase which can fall within the 5703's address
16223 * range. This is not an ICH bug as the PCI spec allows
16224 * non-zero address during special cycles. However, only
16225 * these ICH bridges are known to drive non-zero addresses
16226 * during special cycles.
16227 *
16228 * Since special cycles do not cross PCI bridges, we only
16229 * enable this workaround if the 5703 is on the secondary
16230 * bus of these ICH bridges.
16231 */
16232 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16233 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16234 static struct tg3_dev_id {
16235 u32 vendor;
16236 u32 device;
16237 u32 rev;
16238 } ich_chipsets[] = {
16239 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16240 PCI_ANY_ID },
16241 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16242 PCI_ANY_ID },
16243 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16244 0xa },
16245 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16246 PCI_ANY_ID },
16247 { },
16248 };
16249 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16250 struct pci_dev *bridge = NULL;
16251
16252 while (pci_id->vendor != 0) {
16253 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16254 bridge);
16255 if (!bridge) {
16256 pci_id++;
16257 continue;
16258 }
16259 if (pci_id->rev != PCI_ANY_ID) {
16260 if (bridge->revision > pci_id->rev)
16261 continue;
16262 }
16263 if (bridge->subordinate &&
16264 (bridge->subordinate->number ==
16265 tp->pdev->bus->number)) {
16266 tg3_flag_set(tp, ICH_WORKAROUND);
16267 pci_dev_put(bridge);
16268 break;
16269 }
16270 }
16271 }
16272
16273 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16274 static struct tg3_dev_id {
16275 u32 vendor;
16276 u32 device;
16277 } bridge_chipsets[] = {
16278 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16279 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16280 { },
16281 };
16282 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16283 struct pci_dev *bridge = NULL;
16284
16285 while (pci_id->vendor != 0) {
16286 bridge = pci_get_device(pci_id->vendor,
16287 pci_id->device,
16288 bridge);
16289 if (!bridge) {
16290 pci_id++;
16291 continue;
16292 }
16293 if (bridge->subordinate &&
16294 (bridge->subordinate->number <=
16295 tp->pdev->bus->number) &&
16296 (bridge->subordinate->busn_res.end >=
16297 tp->pdev->bus->number)) {
16298 tg3_flag_set(tp, 5701_DMA_BUG);
16299 pci_dev_put(bridge);
16300 break;
16301 }
16302 }
16303 }
16304
16305 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16306 * DMA addresses > 40-bit. This bridge may have other additional
16307 * 57xx devices behind it in some 4-port NIC designs for example.
16308 * Any tg3 device found behind the bridge will also need the 40-bit
16309 * DMA workaround.
16310 */
16311 if (tg3_flag(tp, 5780_CLASS)) {
16312 tg3_flag_set(tp, 40BIT_DMA_BUG);
16313 tp->msi_cap = tp->pdev->msi_cap;
16314 } else {
16315 struct pci_dev *bridge = NULL;
16316
16317 do {
16318 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16319 PCI_DEVICE_ID_SERVERWORKS_EPB,
16320 bridge);
16321 if (bridge && bridge->subordinate &&
16322 (bridge->subordinate->number <=
16323 tp->pdev->bus->number) &&
16324 (bridge->subordinate->busn_res.end >=
16325 tp->pdev->bus->number)) {
16326 tg3_flag_set(tp, 40BIT_DMA_BUG);
16327 pci_dev_put(bridge);
16328 break;
16329 }
16330 } while (bridge);
16331 }
16332
16333 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16334 tg3_asic_rev(tp) == ASIC_REV_5714)
16335 tp->pdev_peer = tg3_find_peer(tp);
16336
16337 /* Determine TSO capabilities */
16338 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16339 ; /* Do nothing. HW bug. */
16340 else if (tg3_flag(tp, 57765_PLUS))
16341 tg3_flag_set(tp, HW_TSO_3);
16342 else if (tg3_flag(tp, 5755_PLUS) ||
16343 tg3_asic_rev(tp) == ASIC_REV_5906)
16344 tg3_flag_set(tp, HW_TSO_2);
16345 else if (tg3_flag(tp, 5750_PLUS)) {
16346 tg3_flag_set(tp, HW_TSO_1);
16347 tg3_flag_set(tp, TSO_BUG);
16348 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16349 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16350 tg3_flag_clear(tp, TSO_BUG);
16351 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16352 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16353 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16354 tg3_flag_set(tp, FW_TSO);
16355 tg3_flag_set(tp, TSO_BUG);
16356 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16357 tp->fw_needed = FIRMWARE_TG3TSO5;
16358 else
16359 tp->fw_needed = FIRMWARE_TG3TSO;
16360 }
16361
16362 /* Selectively allow TSO based on operating conditions */
16363 if (tg3_flag(tp, HW_TSO_1) ||
16364 tg3_flag(tp, HW_TSO_2) ||
16365 tg3_flag(tp, HW_TSO_3) ||
16366 tg3_flag(tp, FW_TSO)) {
16367 /* For firmware TSO, assume ASF is disabled.
16368 * We'll disable TSO later if we discover ASF
16369 * is enabled in tg3_get_eeprom_hw_cfg().
16370 */
16371 tg3_flag_set(tp, TSO_CAPABLE);
16372 } else {
16373 tg3_flag_clear(tp, TSO_CAPABLE);
16374 tg3_flag_clear(tp, TSO_BUG);
16375 tp->fw_needed = NULL;
16376 }
16377
16378 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16379 tp->fw_needed = FIRMWARE_TG3;
16380
16381 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16382 tp->fw_needed = FIRMWARE_TG357766;
16383
16384 tp->irq_max = 1;
16385
16386 if (tg3_flag(tp, 5750_PLUS)) {
16387 tg3_flag_set(tp, SUPPORT_MSI);
16388 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16389 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16390 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16391 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16392 tp->pdev_peer == tp->pdev))
16393 tg3_flag_clear(tp, SUPPORT_MSI);
16394
16395 if (tg3_flag(tp, 5755_PLUS) ||
16396 tg3_asic_rev(tp) == ASIC_REV_5906) {
16397 tg3_flag_set(tp, 1SHOT_MSI);
16398 }
16399
16400 if (tg3_flag(tp, 57765_PLUS)) {
16401 tg3_flag_set(tp, SUPPORT_MSIX);
16402 tp->irq_max = TG3_IRQ_MAX_VECS;
16403 }
16404 }
16405
16406 tp->txq_max = 1;
16407 tp->rxq_max = 1;
16408 if (tp->irq_max > 1) {
16409 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16410 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16411
16412 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16413 tg3_asic_rev(tp) == ASIC_REV_5720)
16414 tp->txq_max = tp->irq_max - 1;
16415 }
16416
16417 if (tg3_flag(tp, 5755_PLUS) ||
16418 tg3_asic_rev(tp) == ASIC_REV_5906)
16419 tg3_flag_set(tp, SHORT_DMA_BUG);
16420
16421 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16422 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16423
16424 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16425 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16426 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16427 tg3_asic_rev(tp) == ASIC_REV_5762)
16428 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16429
16430 if (tg3_flag(tp, 57765_PLUS) &&
16431 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16432 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16433
16434 if (!tg3_flag(tp, 5705_PLUS) ||
16435 tg3_flag(tp, 5780_CLASS) ||
16436 tg3_flag(tp, USE_JUMBO_BDFLAG))
16437 tg3_flag_set(tp, JUMBO_CAPABLE);
16438
16439 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16440 &pci_state_reg);
16441
16442 if (pci_is_pcie(tp->pdev)) {
16443 u16 lnkctl;
16444
16445 tg3_flag_set(tp, PCI_EXPRESS);
16446
16447 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16448 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16449 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16450 tg3_flag_clear(tp, HW_TSO_2);
16451 tg3_flag_clear(tp, TSO_CAPABLE);
16452 }
16453 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16454 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16455 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16456 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16457 tg3_flag_set(tp, CLKREQ_BUG);
16458 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16459 tg3_flag_set(tp, L1PLLPD_EN);
16460 }
16461 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16462 /* BCM5785 devices are effectively PCIe devices, and should
16463 * follow PCIe codepaths, but do not have a PCIe capabilities
16464 * section.
16465 */
16466 tg3_flag_set(tp, PCI_EXPRESS);
16467 } else if (!tg3_flag(tp, 5705_PLUS) ||
16468 tg3_flag(tp, 5780_CLASS)) {
16469 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16470 if (!tp->pcix_cap) {
16471 dev_err(&tp->pdev->dev,
16472 "Cannot find PCI-X capability, aborting\n");
16473 return -EIO;
16474 }
16475
16476 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16477 tg3_flag_set(tp, PCIX_MODE);
16478 }
16479
16480 /* If we have an AMD 762 or VIA K8T800 chipset, write
16481 * reordering to the mailbox registers done by the host
16482 * controller can cause major troubles. We read back from
16483 * every mailbox register write to force the writes to be
16484 * posted to the chip in order.
16485 */
16486 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16487 !tg3_flag(tp, PCI_EXPRESS))
16488 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16489
16490 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16491 &tp->pci_cacheline_sz);
16492 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16493 &tp->pci_lat_timer);
16494 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16495 tp->pci_lat_timer < 64) {
16496 tp->pci_lat_timer = 64;
16497 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16498 tp->pci_lat_timer);
16499 }
16500
16501 /* Important! -- It is critical that the PCI-X hw workaround
16502 * situation is decided before the first MMIO register access.
16503 */
16504 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16505 /* 5700 BX chips need to have their TX producer index
16506 * mailboxes written twice to workaround a bug.
16507 */
16508 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16509
16510 /* If we are in PCI-X mode, enable register write workaround.
16511 *
16512 * The workaround is to use indirect register accesses
16513 * for all chip writes not to mailbox registers.
16514 */
16515 if (tg3_flag(tp, PCIX_MODE)) {
16516 u32 pm_reg;
16517
16518 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16519
16520 /* The chip can have it's power management PCI config
16521 * space registers clobbered due to this bug.
16522 * So explicitly force the chip into D0 here.
16523 */
16524 pci_read_config_dword(tp->pdev,
16525 tp->pdev->pm_cap + PCI_PM_CTRL,
16526 &pm_reg);
16527 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16528 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16529 pci_write_config_dword(tp->pdev,
16530 tp->pdev->pm_cap + PCI_PM_CTRL,
16531 pm_reg);
16532
16533 /* Also, force SERR#/PERR# in PCI command. */
16534 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16535 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16536 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16537 }
16538 }
16539
16540 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16541 tg3_flag_set(tp, PCI_HIGH_SPEED);
16542 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16543 tg3_flag_set(tp, PCI_32BIT);
16544
16545 /* Chip-specific fixup from Broadcom driver */
16546 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16547 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16548 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16549 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16550 }
16551
16552 /* Default fast path register access methods */
16553 tp->read32 = tg3_read32;
16554 tp->write32 = tg3_write32;
16555 tp->read32_mbox = tg3_read32;
16556 tp->write32_mbox = tg3_write32;
16557 tp->write32_tx_mbox = tg3_write32;
16558 tp->write32_rx_mbox = tg3_write32;
16559
16560 /* Various workaround register access methods */
16561 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16562 tp->write32 = tg3_write_indirect_reg32;
16563 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16564 (tg3_flag(tp, PCI_EXPRESS) &&
16565 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16566 /*
16567 * Back to back register writes can cause problems on these
16568 * chips, the workaround is to read back all reg writes
16569 * except those to mailbox regs.
16570 *
16571 * See tg3_write_indirect_reg32().
16572 */
16573 tp->write32 = tg3_write_flush_reg32;
16574 }
16575
16576 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16577 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16578 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16579 tp->write32_rx_mbox = tg3_write_flush_reg32;
16580 }
16581
16582 if (tg3_flag(tp, ICH_WORKAROUND)) {
16583 tp->read32 = tg3_read_indirect_reg32;
16584 tp->write32 = tg3_write_indirect_reg32;
16585 tp->read32_mbox = tg3_read_indirect_mbox;
16586 tp->write32_mbox = tg3_write_indirect_mbox;
16587 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16588 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16589
16590 iounmap(tp->regs);
16591 tp->regs = NULL;
16592
16593 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16594 pci_cmd &= ~PCI_COMMAND_MEMORY;
16595 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16596 }
16597 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16598 tp->read32_mbox = tg3_read32_mbox_5906;
16599 tp->write32_mbox = tg3_write32_mbox_5906;
16600 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16601 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16602 }
16603
16604 if (tp->write32 == tg3_write_indirect_reg32 ||
16605 (tg3_flag(tp, PCIX_MODE) &&
16606 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16607 tg3_asic_rev(tp) == ASIC_REV_5701)))
16608 tg3_flag_set(tp, SRAM_USE_CONFIG);
16609
16610 /* The memory arbiter has to be enabled in order for SRAM accesses
16611 * to succeed. Normally on powerup the tg3 chip firmware will make
16612 * sure it is enabled, but other entities such as system netboot
16613 * code might disable it.
16614 */
16615 val = tr32(MEMARB_MODE);
16616 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16617
16618 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16619 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16620 tg3_flag(tp, 5780_CLASS)) {
16621 if (tg3_flag(tp, PCIX_MODE)) {
16622 pci_read_config_dword(tp->pdev,
16623 tp->pcix_cap + PCI_X_STATUS,
16624 &val);
16625 tp->pci_fn = val & 0x7;
16626 }
16627 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16628 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16629 tg3_asic_rev(tp) == ASIC_REV_5720) {
16630 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16631 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16632 val = tr32(TG3_CPMU_STATUS);
16633
16634 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16635 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16636 else
16637 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16638 TG3_CPMU_STATUS_FSHFT_5719;
16639 }
16640
16641 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16642 tp->write32_tx_mbox = tg3_write_flush_reg32;
16643 tp->write32_rx_mbox = tg3_write_flush_reg32;
16644 }
16645
16646 /* Get eeprom hw config before calling tg3_set_power_state().
16647 * In particular, the TG3_FLAG_IS_NIC flag must be
16648 * determined before calling tg3_set_power_state() so that
16649 * we know whether or not to switch out of Vaux power.
16650 * When the flag is set, it means that GPIO1 is used for eeprom
16651 * write protect and also implies that it is a LOM where GPIOs
16652 * are not used to switch power.
16653 */
16654 tg3_get_eeprom_hw_cfg(tp);
16655
16656 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16657 tg3_flag_clear(tp, TSO_CAPABLE);
16658 tg3_flag_clear(tp, TSO_BUG);
16659 tp->fw_needed = NULL;
16660 }
16661
16662 if (tg3_flag(tp, ENABLE_APE)) {
16663 /* Allow reads and writes to the
16664 * APE register and memory space.
16665 */
16666 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16667 PCISTATE_ALLOW_APE_SHMEM_WR |
16668 PCISTATE_ALLOW_APE_PSPACE_WR;
16669 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16670 pci_state_reg);
16671
16672 tg3_ape_lock_init(tp);
16673 }
16674
16675 /* Set up tp->grc_local_ctrl before calling
16676 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16677 * will bring 5700's external PHY out of reset.
16678 * It is also used as eeprom write protect on LOMs.
16679 */
16680 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16681 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16682 tg3_flag(tp, EEPROM_WRITE_PROT))
16683 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16684 GRC_LCLCTRL_GPIO_OUTPUT1);
16685 /* Unused GPIO3 must be driven as output on 5752 because there
16686 * are no pull-up resistors on unused GPIO pins.
16687 */
16688 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16689 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16690
16691 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16692 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16693 tg3_flag(tp, 57765_CLASS))
16694 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16695
16696 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16697 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16698 /* Turn off the debug UART. */
16699 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16700 if (tg3_flag(tp, IS_NIC))
16701 /* Keep VMain power. */
16702 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16703 GRC_LCLCTRL_GPIO_OUTPUT0;
16704 }
16705
16706 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16707 tp->grc_local_ctrl |=
16708 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16709
16710 /* Switch out of Vaux if it is a NIC */
16711 tg3_pwrsrc_switch_to_vmain(tp);
16712
16713 /* Derive initial jumbo mode from MTU assigned in
16714 * ether_setup() via the alloc_etherdev() call
16715 */
16716 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16717 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16718
16719 /* Determine WakeOnLan speed to use. */
16720 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16721 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16722 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16723 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16724 tg3_flag_clear(tp, WOL_SPEED_100MB);
16725 } else {
16726 tg3_flag_set(tp, WOL_SPEED_100MB);
16727 }
16728
16729 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16730 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16731
16732 /* A few boards don't want Ethernet@WireSpeed phy feature */
16733 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16734 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16735 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16736 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16737 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16738 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16739 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16740
16741 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16742 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16743 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16744 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16745 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16746
16747 if (tg3_flag(tp, 5705_PLUS) &&
16748 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16749 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16750 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16751 !tg3_flag(tp, 57765_PLUS)) {
16752 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16753 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16754 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16755 tg3_asic_rev(tp) == ASIC_REV_5761) {
16756 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16757 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16758 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16759 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16760 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16761 } else
16762 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16763 }
16764
16765 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16766 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16767 tp->phy_otp = tg3_read_otp_phycfg(tp);
16768 if (tp->phy_otp == 0)
16769 tp->phy_otp = TG3_OTP_DEFAULT;
16770 }
16771
16772 if (tg3_flag(tp, CPMU_PRESENT))
16773 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16774 else
16775 tp->mi_mode = MAC_MI_MODE_BASE;
16776
16777 tp->coalesce_mode = 0;
16778 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16779 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16780 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16781
16782 /* Set these bits to enable statistics workaround. */
16783 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16784 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16785 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16786 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16787 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16788 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16789 }
16790
16791 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16792 tg3_asic_rev(tp) == ASIC_REV_57780)
16793 tg3_flag_set(tp, USE_PHYLIB);
16794
16795 err = tg3_mdio_init(tp);
16796 if (err)
16797 return err;
16798
16799 /* Initialize data/descriptor byte/word swapping. */
16800 val = tr32(GRC_MODE);
16801 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16802 tg3_asic_rev(tp) == ASIC_REV_5762)
16803 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16804 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16805 GRC_MODE_B2HRX_ENABLE |
16806 GRC_MODE_HTX2B_ENABLE |
16807 GRC_MODE_HOST_STACKUP);
16808 else
16809 val &= GRC_MODE_HOST_STACKUP;
16810
16811 tw32(GRC_MODE, val | tp->grc_mode);
16812
16813 tg3_switch_clocks(tp);
16814
16815 /* Clear this out for sanity. */
16816 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16817
16818 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16819 tw32(TG3PCI_REG_BASE_ADDR, 0);
16820
16821 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16822 &pci_state_reg);
16823 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16824 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16825 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16826 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16827 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16828 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16829 void __iomem *sram_base;
16830
16831 /* Write some dummy words into the SRAM status block
16832 * area, see if it reads back correctly. If the return
16833 * value is bad, force enable the PCIX workaround.
16834 */
16835 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16836
16837 writel(0x00000000, sram_base);
16838 writel(0x00000000, sram_base + 4);
16839 writel(0xffffffff, sram_base + 4);
16840 if (readl(sram_base) != 0x00000000)
16841 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16842 }
16843 }
16844
16845 udelay(50);
16846 tg3_nvram_init(tp);
16847
16848 /* If the device has an NVRAM, no need to load patch firmware */
16849 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16850 !tg3_flag(tp, NO_NVRAM))
16851 tp->fw_needed = NULL;
16852
16853 grc_misc_cfg = tr32(GRC_MISC_CFG);
16854 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16855
16856 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16857 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16858 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16859 tg3_flag_set(tp, IS_5788);
16860
16861 if (!tg3_flag(tp, IS_5788) &&
16862 tg3_asic_rev(tp) != ASIC_REV_5700)
16863 tg3_flag_set(tp, TAGGED_STATUS);
16864 if (tg3_flag(tp, TAGGED_STATUS)) {
16865 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16866 HOSTCC_MODE_CLRTICK_TXBD);
16867
16868 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16869 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16870 tp->misc_host_ctrl);
16871 }
16872
16873 /* Preserve the APE MAC_MODE bits */
16874 if (tg3_flag(tp, ENABLE_APE))
16875 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16876 else
16877 tp->mac_mode = 0;
16878
16879 if (tg3_10_100_only_device(tp, ent))
16880 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16881
16882 err = tg3_phy_probe(tp);
16883 if (err) {
16884 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16885 /* ... but do not return immediately ... */
16886 tg3_mdio_fini(tp);
16887 }
16888
16889 tg3_read_vpd(tp);
16890 tg3_read_fw_ver(tp);
16891
16892 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16893 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16894 } else {
16895 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16896 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16897 else
16898 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16899 }
16900
16901 /* 5700 {AX,BX} chips have a broken status block link
16902 * change bit implementation, so we must use the
16903 * status register in those cases.
16904 */
16905 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16906 tg3_flag_set(tp, USE_LINKCHG_REG);
16907 else
16908 tg3_flag_clear(tp, USE_LINKCHG_REG);
16909
16910 /* The led_ctrl is set during tg3_phy_probe, here we might
16911 * have to force the link status polling mechanism based
16912 * upon subsystem IDs.
16913 */
16914 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16915 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16916 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16917 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16918 tg3_flag_set(tp, USE_LINKCHG_REG);
16919 }
16920
16921 /* For all SERDES we poll the MAC status register. */
16922 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16923 tg3_flag_set(tp, POLL_SERDES);
16924 else
16925 tg3_flag_clear(tp, POLL_SERDES);
16926
16927 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16928 tg3_flag_set(tp, POLL_CPMU_LINK);
16929
16930 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16931 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16932 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16933 tg3_flag(tp, PCIX_MODE)) {
16934 tp->rx_offset = NET_SKB_PAD;
16935 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16936 tp->rx_copy_thresh = ~(u16)0;
16937 #endif
16938 }
16939
16940 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16941 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16942 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16943
16944 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16945
16946 /* Increment the rx prod index on the rx std ring by at most
16947 * 8 for these chips to workaround hw errata.
16948 */
16949 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16950 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16951 tg3_asic_rev(tp) == ASIC_REV_5755)
16952 tp->rx_std_max_post = 8;
16953
16954 if (tg3_flag(tp, ASPM_WORKAROUND))
16955 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16956 PCIE_PWR_MGMT_L1_THRESH_MSK;
16957
16958 return err;
16959 }
16960
16961 #ifdef CONFIG_SPARC
tg3_get_macaddr_sparc(struct tg3 * tp)16962 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16963 {
16964 struct net_device *dev = tp->dev;
16965 struct pci_dev *pdev = tp->pdev;
16966 struct device_node *dp = pci_device_to_OF_node(pdev);
16967 const unsigned char *addr;
16968 int len;
16969
16970 addr = of_get_property(dp, "local-mac-address", &len);
16971 if (addr && len == ETH_ALEN) {
16972 memcpy(dev->dev_addr, addr, ETH_ALEN);
16973 return 0;
16974 }
16975 return -ENODEV;
16976 }
16977
tg3_get_default_macaddr_sparc(struct tg3 * tp)16978 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16979 {
16980 struct net_device *dev = tp->dev;
16981
16982 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16983 return 0;
16984 }
16985 #endif
16986
tg3_get_device_address(struct tg3 * tp)16987 static int tg3_get_device_address(struct tg3 *tp)
16988 {
16989 struct net_device *dev = tp->dev;
16990 u32 hi, lo, mac_offset;
16991 int addr_ok = 0;
16992 int err;
16993
16994 #ifdef CONFIG_SPARC
16995 if (!tg3_get_macaddr_sparc(tp))
16996 return 0;
16997 #endif
16998
16999 if (tg3_flag(tp, IS_SSB_CORE)) {
17000 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
17001 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
17002 return 0;
17003 }
17004
17005 mac_offset = 0x7c;
17006 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17007 tg3_flag(tp, 5780_CLASS)) {
17008 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17009 mac_offset = 0xcc;
17010 if (tg3_nvram_lock(tp))
17011 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17012 else
17013 tg3_nvram_unlock(tp);
17014 } else if (tg3_flag(tp, 5717_PLUS)) {
17015 if (tp->pci_fn & 1)
17016 mac_offset = 0xcc;
17017 if (tp->pci_fn > 1)
17018 mac_offset += 0x18c;
17019 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17020 mac_offset = 0x10;
17021
17022 /* First try to get it from MAC address mailbox. */
17023 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17024 if ((hi >> 16) == 0x484b) {
17025 dev->dev_addr[0] = (hi >> 8) & 0xff;
17026 dev->dev_addr[1] = (hi >> 0) & 0xff;
17027
17028 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17029 dev->dev_addr[2] = (lo >> 24) & 0xff;
17030 dev->dev_addr[3] = (lo >> 16) & 0xff;
17031 dev->dev_addr[4] = (lo >> 8) & 0xff;
17032 dev->dev_addr[5] = (lo >> 0) & 0xff;
17033
17034 /* Some old bootcode may report a 0 MAC address in SRAM */
17035 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17036 }
17037 if (!addr_ok) {
17038 /* Next, try NVRAM. */
17039 if (!tg3_flag(tp, NO_NVRAM) &&
17040 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17041 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17042 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17043 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17044 }
17045 /* Finally just fetch it out of the MAC control regs. */
17046 else {
17047 hi = tr32(MAC_ADDR_0_HIGH);
17048 lo = tr32(MAC_ADDR_0_LOW);
17049
17050 dev->dev_addr[5] = lo & 0xff;
17051 dev->dev_addr[4] = (lo >> 8) & 0xff;
17052 dev->dev_addr[3] = (lo >> 16) & 0xff;
17053 dev->dev_addr[2] = (lo >> 24) & 0xff;
17054 dev->dev_addr[1] = hi & 0xff;
17055 dev->dev_addr[0] = (hi >> 8) & 0xff;
17056 }
17057 }
17058
17059 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17060 #ifdef CONFIG_SPARC
17061 if (!tg3_get_default_macaddr_sparc(tp))
17062 return 0;
17063 #endif
17064 return -EINVAL;
17065 }
17066 return 0;
17067 }
17068
17069 #define BOUNDARY_SINGLE_CACHELINE 1
17070 #define BOUNDARY_MULTI_CACHELINE 2
17071
tg3_calc_dma_bndry(struct tg3 * tp,u32 val)17072 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17073 {
17074 int cacheline_size;
17075 u8 byte;
17076 int goal;
17077
17078 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17079 if (byte == 0)
17080 cacheline_size = 1024;
17081 else
17082 cacheline_size = (int) byte * 4;
17083
17084 /* On 5703 and later chips, the boundary bits have no
17085 * effect.
17086 */
17087 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17088 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17089 !tg3_flag(tp, PCI_EXPRESS))
17090 goto out;
17091
17092 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17093 goal = BOUNDARY_MULTI_CACHELINE;
17094 #else
17095 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17096 goal = BOUNDARY_SINGLE_CACHELINE;
17097 #else
17098 goal = 0;
17099 #endif
17100 #endif
17101
17102 if (tg3_flag(tp, 57765_PLUS)) {
17103 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17104 goto out;
17105 }
17106
17107 if (!goal)
17108 goto out;
17109
17110 /* PCI controllers on most RISC systems tend to disconnect
17111 * when a device tries to burst across a cache-line boundary.
17112 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17113 *
17114 * Unfortunately, for PCI-E there are only limited
17115 * write-side controls for this, and thus for reads
17116 * we will still get the disconnects. We'll also waste
17117 * these PCI cycles for both read and write for chips
17118 * other than 5700 and 5701 which do not implement the
17119 * boundary bits.
17120 */
17121 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17122 switch (cacheline_size) {
17123 case 16:
17124 case 32:
17125 case 64:
17126 case 128:
17127 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17128 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17129 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17130 } else {
17131 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17132 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17133 }
17134 break;
17135
17136 case 256:
17137 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17138 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17139 break;
17140
17141 default:
17142 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17143 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17144 break;
17145 }
17146 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17147 switch (cacheline_size) {
17148 case 16:
17149 case 32:
17150 case 64:
17151 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17152 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17153 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17154 break;
17155 }
17156 /* fallthrough */
17157 case 128:
17158 default:
17159 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17160 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17161 break;
17162 }
17163 } else {
17164 switch (cacheline_size) {
17165 case 16:
17166 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17167 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17168 DMA_RWCTRL_WRITE_BNDRY_16);
17169 break;
17170 }
17171 /* fallthrough */
17172 case 32:
17173 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17174 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17175 DMA_RWCTRL_WRITE_BNDRY_32);
17176 break;
17177 }
17178 /* fallthrough */
17179 case 64:
17180 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17181 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17182 DMA_RWCTRL_WRITE_BNDRY_64);
17183 break;
17184 }
17185 /* fallthrough */
17186 case 128:
17187 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17188 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17189 DMA_RWCTRL_WRITE_BNDRY_128);
17190 break;
17191 }
17192 /* fallthrough */
17193 case 256:
17194 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17195 DMA_RWCTRL_WRITE_BNDRY_256);
17196 break;
17197 case 512:
17198 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17199 DMA_RWCTRL_WRITE_BNDRY_512);
17200 break;
17201 case 1024:
17202 default:
17203 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17204 DMA_RWCTRL_WRITE_BNDRY_1024);
17205 break;
17206 }
17207 }
17208
17209 out:
17210 return val;
17211 }
17212
tg3_do_test_dma(struct tg3 * tp,u32 * buf,dma_addr_t buf_dma,int size,bool to_device)17213 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17214 int size, bool to_device)
17215 {
17216 struct tg3_internal_buffer_desc test_desc;
17217 u32 sram_dma_descs;
17218 int i, ret;
17219
17220 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17221
17222 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17223 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17224 tw32(RDMAC_STATUS, 0);
17225 tw32(WDMAC_STATUS, 0);
17226
17227 tw32(BUFMGR_MODE, 0);
17228 tw32(FTQ_RESET, 0);
17229
17230 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17231 test_desc.addr_lo = buf_dma & 0xffffffff;
17232 test_desc.nic_mbuf = 0x00002100;
17233 test_desc.len = size;
17234
17235 /*
17236 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17237 * the *second* time the tg3 driver was getting loaded after an
17238 * initial scan.
17239 *
17240 * Broadcom tells me:
17241 * ...the DMA engine is connected to the GRC block and a DMA
17242 * reset may affect the GRC block in some unpredictable way...
17243 * The behavior of resets to individual blocks has not been tested.
17244 *
17245 * Broadcom noted the GRC reset will also reset all sub-components.
17246 */
17247 if (to_device) {
17248 test_desc.cqid_sqid = (13 << 8) | 2;
17249
17250 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17251 udelay(40);
17252 } else {
17253 test_desc.cqid_sqid = (16 << 8) | 7;
17254
17255 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17256 udelay(40);
17257 }
17258 test_desc.flags = 0x00000005;
17259
17260 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17261 u32 val;
17262
17263 val = *(((u32 *)&test_desc) + i);
17264 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17265 sram_dma_descs + (i * sizeof(u32)));
17266 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17267 }
17268 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17269
17270 if (to_device)
17271 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17272 else
17273 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17274
17275 ret = -ENODEV;
17276 for (i = 0; i < 40; i++) {
17277 u32 val;
17278
17279 if (to_device)
17280 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17281 else
17282 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17283 if ((val & 0xffff) == sram_dma_descs) {
17284 ret = 0;
17285 break;
17286 }
17287
17288 udelay(100);
17289 }
17290
17291 return ret;
17292 }
17293
17294 #define TEST_BUFFER_SIZE 0x2000
17295
17296 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17297 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17298 { },
17299 };
17300
tg3_test_dma(struct tg3 * tp)17301 static int tg3_test_dma(struct tg3 *tp)
17302 {
17303 dma_addr_t buf_dma;
17304 u32 *buf, saved_dma_rwctrl;
17305 int ret = 0;
17306
17307 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17308 &buf_dma, GFP_KERNEL);
17309 if (!buf) {
17310 ret = -ENOMEM;
17311 goto out_nofree;
17312 }
17313
17314 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17315 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17316
17317 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17318
17319 if (tg3_flag(tp, 57765_PLUS))
17320 goto out;
17321
17322 if (tg3_flag(tp, PCI_EXPRESS)) {
17323 /* DMA read watermark not used on PCIE */
17324 tp->dma_rwctrl |= 0x00180000;
17325 } else if (!tg3_flag(tp, PCIX_MODE)) {
17326 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17327 tg3_asic_rev(tp) == ASIC_REV_5750)
17328 tp->dma_rwctrl |= 0x003f0000;
17329 else
17330 tp->dma_rwctrl |= 0x003f000f;
17331 } else {
17332 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17333 tg3_asic_rev(tp) == ASIC_REV_5704) {
17334 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17335 u32 read_water = 0x7;
17336
17337 /* If the 5704 is behind the EPB bridge, we can
17338 * do the less restrictive ONE_DMA workaround for
17339 * better performance.
17340 */
17341 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17342 tg3_asic_rev(tp) == ASIC_REV_5704)
17343 tp->dma_rwctrl |= 0x8000;
17344 else if (ccval == 0x6 || ccval == 0x7)
17345 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17346
17347 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17348 read_water = 4;
17349 /* Set bit 23 to enable PCIX hw bug fix */
17350 tp->dma_rwctrl |=
17351 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17352 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17353 (1 << 23);
17354 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17355 /* 5780 always in PCIX mode */
17356 tp->dma_rwctrl |= 0x00144000;
17357 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17358 /* 5714 always in PCIX mode */
17359 tp->dma_rwctrl |= 0x00148000;
17360 } else {
17361 tp->dma_rwctrl |= 0x001b000f;
17362 }
17363 }
17364 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17365 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17366
17367 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17368 tg3_asic_rev(tp) == ASIC_REV_5704)
17369 tp->dma_rwctrl &= 0xfffffff0;
17370
17371 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17372 tg3_asic_rev(tp) == ASIC_REV_5701) {
17373 /* Remove this if it causes problems for some boards. */
17374 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17375
17376 /* On 5700/5701 chips, we need to set this bit.
17377 * Otherwise the chip will issue cacheline transactions
17378 * to streamable DMA memory with not all the byte
17379 * enables turned on. This is an error on several
17380 * RISC PCI controllers, in particular sparc64.
17381 *
17382 * On 5703/5704 chips, this bit has been reassigned
17383 * a different meaning. In particular, it is used
17384 * on those chips to enable a PCI-X workaround.
17385 */
17386 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17387 }
17388
17389 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17390
17391
17392 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17393 tg3_asic_rev(tp) != ASIC_REV_5701)
17394 goto out;
17395
17396 /* It is best to perform DMA test with maximum write burst size
17397 * to expose the 5700/5701 write DMA bug.
17398 */
17399 saved_dma_rwctrl = tp->dma_rwctrl;
17400 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17401 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17402
17403 while (1) {
17404 u32 *p = buf, i;
17405
17406 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17407 p[i] = i;
17408
17409 /* Send the buffer to the chip. */
17410 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17411 if (ret) {
17412 dev_err(&tp->pdev->dev,
17413 "%s: Buffer write failed. err = %d\n",
17414 __func__, ret);
17415 break;
17416 }
17417
17418 /* Now read it back. */
17419 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17420 if (ret) {
17421 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17422 "err = %d\n", __func__, ret);
17423 break;
17424 }
17425
17426 /* Verify it. */
17427 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17428 if (p[i] == i)
17429 continue;
17430
17431 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17432 DMA_RWCTRL_WRITE_BNDRY_16) {
17433 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17434 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17435 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17436 break;
17437 } else {
17438 dev_err(&tp->pdev->dev,
17439 "%s: Buffer corrupted on read back! "
17440 "(%d != %d)\n", __func__, p[i], i);
17441 ret = -ENODEV;
17442 goto out;
17443 }
17444 }
17445
17446 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17447 /* Success. */
17448 ret = 0;
17449 break;
17450 }
17451 }
17452 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17453 DMA_RWCTRL_WRITE_BNDRY_16) {
17454 /* DMA test passed without adjusting DMA boundary,
17455 * now look for chipsets that are known to expose the
17456 * DMA bug without failing the test.
17457 */
17458 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17459 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17460 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17461 } else {
17462 /* Safe to use the calculated DMA boundary. */
17463 tp->dma_rwctrl = saved_dma_rwctrl;
17464 }
17465
17466 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17467 }
17468
17469 out:
17470 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17471 out_nofree:
17472 return ret;
17473 }
17474
tg3_init_bufmgr_config(struct tg3 * tp)17475 static void tg3_init_bufmgr_config(struct tg3 *tp)
17476 {
17477 if (tg3_flag(tp, 57765_PLUS)) {
17478 tp->bufmgr_config.mbuf_read_dma_low_water =
17479 DEFAULT_MB_RDMA_LOW_WATER_5705;
17480 tp->bufmgr_config.mbuf_mac_rx_low_water =
17481 DEFAULT_MB_MACRX_LOW_WATER_57765;
17482 tp->bufmgr_config.mbuf_high_water =
17483 DEFAULT_MB_HIGH_WATER_57765;
17484
17485 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17486 DEFAULT_MB_RDMA_LOW_WATER_5705;
17487 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17488 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17489 tp->bufmgr_config.mbuf_high_water_jumbo =
17490 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17491 } else if (tg3_flag(tp, 5705_PLUS)) {
17492 tp->bufmgr_config.mbuf_read_dma_low_water =
17493 DEFAULT_MB_RDMA_LOW_WATER_5705;
17494 tp->bufmgr_config.mbuf_mac_rx_low_water =
17495 DEFAULT_MB_MACRX_LOW_WATER_5705;
17496 tp->bufmgr_config.mbuf_high_water =
17497 DEFAULT_MB_HIGH_WATER_5705;
17498 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17499 tp->bufmgr_config.mbuf_mac_rx_low_water =
17500 DEFAULT_MB_MACRX_LOW_WATER_5906;
17501 tp->bufmgr_config.mbuf_high_water =
17502 DEFAULT_MB_HIGH_WATER_5906;
17503 }
17504
17505 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17506 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17507 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17508 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17509 tp->bufmgr_config.mbuf_high_water_jumbo =
17510 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17511 } else {
17512 tp->bufmgr_config.mbuf_read_dma_low_water =
17513 DEFAULT_MB_RDMA_LOW_WATER;
17514 tp->bufmgr_config.mbuf_mac_rx_low_water =
17515 DEFAULT_MB_MACRX_LOW_WATER;
17516 tp->bufmgr_config.mbuf_high_water =
17517 DEFAULT_MB_HIGH_WATER;
17518
17519 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17520 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17521 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17522 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17523 tp->bufmgr_config.mbuf_high_water_jumbo =
17524 DEFAULT_MB_HIGH_WATER_JUMBO;
17525 }
17526
17527 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17528 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17529 }
17530
tg3_phy_string(struct tg3 * tp)17531 static char *tg3_phy_string(struct tg3 *tp)
17532 {
17533 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17534 case TG3_PHY_ID_BCM5400: return "5400";
17535 case TG3_PHY_ID_BCM5401: return "5401";
17536 case TG3_PHY_ID_BCM5411: return "5411";
17537 case TG3_PHY_ID_BCM5701: return "5701";
17538 case TG3_PHY_ID_BCM5703: return "5703";
17539 case TG3_PHY_ID_BCM5704: return "5704";
17540 case TG3_PHY_ID_BCM5705: return "5705";
17541 case TG3_PHY_ID_BCM5750: return "5750";
17542 case TG3_PHY_ID_BCM5752: return "5752";
17543 case TG3_PHY_ID_BCM5714: return "5714";
17544 case TG3_PHY_ID_BCM5780: return "5780";
17545 case TG3_PHY_ID_BCM5755: return "5755";
17546 case TG3_PHY_ID_BCM5787: return "5787";
17547 case TG3_PHY_ID_BCM5784: return "5784";
17548 case TG3_PHY_ID_BCM5756: return "5722/5756";
17549 case TG3_PHY_ID_BCM5906: return "5906";
17550 case TG3_PHY_ID_BCM5761: return "5761";
17551 case TG3_PHY_ID_BCM5718C: return "5718C";
17552 case TG3_PHY_ID_BCM5718S: return "5718S";
17553 case TG3_PHY_ID_BCM57765: return "57765";
17554 case TG3_PHY_ID_BCM5719C: return "5719C";
17555 case TG3_PHY_ID_BCM5720C: return "5720C";
17556 case TG3_PHY_ID_BCM5762: return "5762C";
17557 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17558 case 0: return "serdes";
17559 default: return "unknown";
17560 }
17561 }
17562
tg3_bus_string(struct tg3 * tp,char * str)17563 static char *tg3_bus_string(struct tg3 *tp, char *str)
17564 {
17565 if (tg3_flag(tp, PCI_EXPRESS)) {
17566 strcpy(str, "PCI Express");
17567 return str;
17568 } else if (tg3_flag(tp, PCIX_MODE)) {
17569 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17570
17571 strcpy(str, "PCIX:");
17572
17573 if ((clock_ctrl == 7) ||
17574 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17575 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17576 strcat(str, "133MHz");
17577 else if (clock_ctrl == 0)
17578 strcat(str, "33MHz");
17579 else if (clock_ctrl == 2)
17580 strcat(str, "50MHz");
17581 else if (clock_ctrl == 4)
17582 strcat(str, "66MHz");
17583 else if (clock_ctrl == 6)
17584 strcat(str, "100MHz");
17585 } else {
17586 strcpy(str, "PCI:");
17587 if (tg3_flag(tp, PCI_HIGH_SPEED))
17588 strcat(str, "66MHz");
17589 else
17590 strcat(str, "33MHz");
17591 }
17592 if (tg3_flag(tp, PCI_32BIT))
17593 strcat(str, ":32-bit");
17594 else
17595 strcat(str, ":64-bit");
17596 return str;
17597 }
17598
tg3_init_coal(struct tg3 * tp)17599 static void tg3_init_coal(struct tg3 *tp)
17600 {
17601 struct ethtool_coalesce *ec = &tp->coal;
17602
17603 memset(ec, 0, sizeof(*ec));
17604 ec->cmd = ETHTOOL_GCOALESCE;
17605 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17606 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17607 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17608 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17609 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17610 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17611 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17612 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17613 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17614
17615 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17616 HOSTCC_MODE_CLRTICK_TXBD)) {
17617 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17618 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17619 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17620 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17621 }
17622
17623 if (tg3_flag(tp, 5705_PLUS)) {
17624 ec->rx_coalesce_usecs_irq = 0;
17625 ec->tx_coalesce_usecs_irq = 0;
17626 ec->stats_block_coalesce_usecs = 0;
17627 }
17628 }
17629
tg3_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)17630 static int tg3_init_one(struct pci_dev *pdev,
17631 const struct pci_device_id *ent)
17632 {
17633 struct net_device *dev;
17634 struct tg3 *tp;
17635 int i, err;
17636 u32 sndmbx, rcvmbx, intmbx;
17637 char str[40];
17638 u64 dma_mask, persist_dma_mask;
17639 netdev_features_t features = 0;
17640
17641 printk_once(KERN_INFO "%s\n", version);
17642
17643 err = pci_enable_device(pdev);
17644 if (err) {
17645 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17646 return err;
17647 }
17648
17649 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17650 if (err) {
17651 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17652 goto err_out_disable_pdev;
17653 }
17654
17655 pci_set_master(pdev);
17656
17657 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17658 if (!dev) {
17659 err = -ENOMEM;
17660 goto err_out_free_res;
17661 }
17662
17663 SET_NETDEV_DEV(dev, &pdev->dev);
17664
17665 tp = netdev_priv(dev);
17666 tp->pdev = pdev;
17667 tp->dev = dev;
17668 tp->rx_mode = TG3_DEF_RX_MODE;
17669 tp->tx_mode = TG3_DEF_TX_MODE;
17670 tp->irq_sync = 1;
17671 tp->pcierr_recovery = false;
17672
17673 if (tg3_debug > 0)
17674 tp->msg_enable = tg3_debug;
17675 else
17676 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17677
17678 if (pdev_is_ssb_gige_core(pdev)) {
17679 tg3_flag_set(tp, IS_SSB_CORE);
17680 if (ssb_gige_must_flush_posted_writes(pdev))
17681 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17682 if (ssb_gige_one_dma_at_once(pdev))
17683 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17684 if (ssb_gige_have_roboswitch(pdev)) {
17685 tg3_flag_set(tp, USE_PHYLIB);
17686 tg3_flag_set(tp, ROBOSWITCH);
17687 }
17688 if (ssb_gige_is_rgmii(pdev))
17689 tg3_flag_set(tp, RGMII_MODE);
17690 }
17691
17692 /* The word/byte swap controls here control register access byte
17693 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17694 * setting below.
17695 */
17696 tp->misc_host_ctrl =
17697 MISC_HOST_CTRL_MASK_PCI_INT |
17698 MISC_HOST_CTRL_WORD_SWAP |
17699 MISC_HOST_CTRL_INDIR_ACCESS |
17700 MISC_HOST_CTRL_PCISTATE_RW;
17701
17702 /* The NONFRM (non-frame) byte/word swap controls take effect
17703 * on descriptor entries, anything which isn't packet data.
17704 *
17705 * The StrongARM chips on the board (one for tx, one for rx)
17706 * are running in big-endian mode.
17707 */
17708 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17709 GRC_MODE_WSWAP_NONFRM_DATA);
17710 #ifdef __BIG_ENDIAN
17711 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17712 #endif
17713 spin_lock_init(&tp->lock);
17714 spin_lock_init(&tp->indirect_lock);
17715 INIT_WORK(&tp->reset_task, tg3_reset_task);
17716
17717 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17718 if (!tp->regs) {
17719 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17720 err = -ENOMEM;
17721 goto err_out_free_dev;
17722 }
17723
17724 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17725 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17726 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17727 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17728 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17729 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17730 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17731 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17732 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17733 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17734 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17735 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17736 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17737 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17738 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17739 tg3_flag_set(tp, ENABLE_APE);
17740 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17741 if (!tp->aperegs) {
17742 dev_err(&pdev->dev,
17743 "Cannot map APE registers, aborting\n");
17744 err = -ENOMEM;
17745 goto err_out_iounmap;
17746 }
17747 }
17748
17749 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17750 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17751
17752 dev->ethtool_ops = &tg3_ethtool_ops;
17753 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17754 dev->netdev_ops = &tg3_netdev_ops;
17755 dev->irq = pdev->irq;
17756
17757 err = tg3_get_invariants(tp, ent);
17758 if (err) {
17759 dev_err(&pdev->dev,
17760 "Problem fetching invariants of chip, aborting\n");
17761 goto err_out_apeunmap;
17762 }
17763
17764 /* The EPB bridge inside 5714, 5715, and 5780 and any
17765 * device behind the EPB cannot support DMA addresses > 40-bit.
17766 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17767 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17768 * do DMA address check in tg3_start_xmit().
17769 */
17770 if (tg3_flag(tp, IS_5788))
17771 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17772 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17773 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17774 #ifdef CONFIG_HIGHMEM
17775 dma_mask = DMA_BIT_MASK(64);
17776 #endif
17777 } else
17778 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17779
17780 /* Configure DMA attributes. */
17781 if (dma_mask > DMA_BIT_MASK(32)) {
17782 err = pci_set_dma_mask(pdev, dma_mask);
17783 if (!err) {
17784 features |= NETIF_F_HIGHDMA;
17785 err = pci_set_consistent_dma_mask(pdev,
17786 persist_dma_mask);
17787 if (err < 0) {
17788 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17789 "DMA for consistent allocations\n");
17790 goto err_out_apeunmap;
17791 }
17792 }
17793 }
17794 if (err || dma_mask == DMA_BIT_MASK(32)) {
17795 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17796 if (err) {
17797 dev_err(&pdev->dev,
17798 "No usable DMA configuration, aborting\n");
17799 goto err_out_apeunmap;
17800 }
17801 }
17802
17803 tg3_init_bufmgr_config(tp);
17804
17805 /* 5700 B0 chips do not support checksumming correctly due
17806 * to hardware bugs.
17807 */
17808 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17809 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17810
17811 if (tg3_flag(tp, 5755_PLUS))
17812 features |= NETIF_F_IPV6_CSUM;
17813 }
17814
17815 /* TSO is on by default on chips that support hardware TSO.
17816 * Firmware TSO on older chips gives lower performance, so it
17817 * is off by default, but can be enabled using ethtool.
17818 */
17819 if ((tg3_flag(tp, HW_TSO_1) ||
17820 tg3_flag(tp, HW_TSO_2) ||
17821 tg3_flag(tp, HW_TSO_3)) &&
17822 (features & NETIF_F_IP_CSUM))
17823 features |= NETIF_F_TSO;
17824 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17825 if (features & NETIF_F_IPV6_CSUM)
17826 features |= NETIF_F_TSO6;
17827 if (tg3_flag(tp, HW_TSO_3) ||
17828 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17829 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17830 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17831 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17832 tg3_asic_rev(tp) == ASIC_REV_57780)
17833 features |= NETIF_F_TSO_ECN;
17834 }
17835
17836 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17837 NETIF_F_HW_VLAN_CTAG_RX;
17838 dev->vlan_features |= features;
17839
17840 /*
17841 * Add loopback capability only for a subset of devices that support
17842 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17843 * loopback for the remaining devices.
17844 */
17845 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17846 !tg3_flag(tp, CPMU_PRESENT))
17847 /* Add the loopback capability */
17848 features |= NETIF_F_LOOPBACK;
17849
17850 dev->hw_features |= features;
17851 dev->priv_flags |= IFF_UNICAST_FLT;
17852
17853 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17854 !tg3_flag(tp, TSO_CAPABLE) &&
17855 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17856 tg3_flag_set(tp, MAX_RXPEND_64);
17857 tp->rx_pending = 63;
17858 }
17859
17860 err = tg3_get_device_address(tp);
17861 if (err) {
17862 dev_err(&pdev->dev,
17863 "Could not obtain valid ethernet address, aborting\n");
17864 goto err_out_apeunmap;
17865 }
17866
17867 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17868 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17869 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17870 for (i = 0; i < tp->irq_max; i++) {
17871 struct tg3_napi *tnapi = &tp->napi[i];
17872
17873 tnapi->tp = tp;
17874 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17875
17876 tnapi->int_mbox = intmbx;
17877 if (i <= 4)
17878 intmbx += 0x8;
17879 else
17880 intmbx += 0x4;
17881
17882 tnapi->consmbox = rcvmbx;
17883 tnapi->prodmbox = sndmbx;
17884
17885 if (i)
17886 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17887 else
17888 tnapi->coal_now = HOSTCC_MODE_NOW;
17889
17890 if (!tg3_flag(tp, SUPPORT_MSIX))
17891 break;
17892
17893 /*
17894 * If we support MSIX, we'll be using RSS. If we're using
17895 * RSS, the first vector only handles link interrupts and the
17896 * remaining vectors handle rx and tx interrupts. Reuse the
17897 * mailbox values for the next iteration. The values we setup
17898 * above are still useful for the single vectored mode.
17899 */
17900 if (!i)
17901 continue;
17902
17903 rcvmbx += 0x8;
17904
17905 if (sndmbx & 0x4)
17906 sndmbx -= 0x4;
17907 else
17908 sndmbx += 0xc;
17909 }
17910
17911 /*
17912 * Reset chip in case UNDI or EFI driver did not shutdown
17913 * DMA self test will enable WDMAC and we'll see (spurious)
17914 * pending DMA on the PCI bus at that point.
17915 */
17916 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17917 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17918 tg3_full_lock(tp, 0);
17919 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17920 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17921 tg3_full_unlock(tp);
17922 }
17923
17924 err = tg3_test_dma(tp);
17925 if (err) {
17926 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17927 goto err_out_apeunmap;
17928 }
17929
17930 tg3_init_coal(tp);
17931
17932 pci_set_drvdata(pdev, dev);
17933
17934 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17935 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17936 tg3_asic_rev(tp) == ASIC_REV_5762)
17937 tg3_flag_set(tp, PTP_CAPABLE);
17938
17939 tg3_timer_init(tp);
17940
17941 tg3_carrier_off(tp);
17942
17943 err = register_netdev(dev);
17944 if (err) {
17945 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17946 goto err_out_apeunmap;
17947 }
17948
17949 if (tg3_flag(tp, PTP_CAPABLE)) {
17950 tg3_ptp_init(tp);
17951 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17952 &tp->pdev->dev);
17953 if (IS_ERR(tp->ptp_clock))
17954 tp->ptp_clock = NULL;
17955 }
17956
17957 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17958 tp->board_part_number,
17959 tg3_chip_rev_id(tp),
17960 tg3_bus_string(tp, str),
17961 dev->dev_addr);
17962
17963 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17964 struct phy_device *phydev;
17965 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
17966 netdev_info(dev,
17967 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17968 phydev->drv->name, dev_name(&phydev->dev));
17969 } else {
17970 char *ethtype;
17971
17972 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17973 ethtype = "10/100Base-TX";
17974 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17975 ethtype = "1000Base-SX";
17976 else
17977 ethtype = "10/100/1000Base-T";
17978
17979 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17980 "(WireSpeed[%d], EEE[%d])\n",
17981 tg3_phy_string(tp), ethtype,
17982 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17983 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17984 }
17985
17986 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17987 (dev->features & NETIF_F_RXCSUM) != 0,
17988 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17989 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17990 tg3_flag(tp, ENABLE_ASF) != 0,
17991 tg3_flag(tp, TSO_CAPABLE) != 0);
17992 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17993 tp->dma_rwctrl,
17994 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17995 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17996
17997 pci_save_state(pdev);
17998
17999 return 0;
18000
18001 err_out_apeunmap:
18002 if (tp->aperegs) {
18003 iounmap(tp->aperegs);
18004 tp->aperegs = NULL;
18005 }
18006
18007 err_out_iounmap:
18008 if (tp->regs) {
18009 iounmap(tp->regs);
18010 tp->regs = NULL;
18011 }
18012
18013 err_out_free_dev:
18014 free_netdev(dev);
18015
18016 err_out_free_res:
18017 pci_release_regions(pdev);
18018
18019 err_out_disable_pdev:
18020 if (pci_is_enabled(pdev))
18021 pci_disable_device(pdev);
18022 return err;
18023 }
18024
tg3_remove_one(struct pci_dev * pdev)18025 static void tg3_remove_one(struct pci_dev *pdev)
18026 {
18027 struct net_device *dev = pci_get_drvdata(pdev);
18028
18029 if (dev) {
18030 struct tg3 *tp = netdev_priv(dev);
18031
18032 tg3_ptp_fini(tp);
18033
18034 release_firmware(tp->fw);
18035
18036 tg3_reset_task_cancel(tp);
18037
18038 if (tg3_flag(tp, USE_PHYLIB)) {
18039 tg3_phy_fini(tp);
18040 tg3_mdio_fini(tp);
18041 }
18042
18043 unregister_netdev(dev);
18044 if (tp->aperegs) {
18045 iounmap(tp->aperegs);
18046 tp->aperegs = NULL;
18047 }
18048 if (tp->regs) {
18049 iounmap(tp->regs);
18050 tp->regs = NULL;
18051 }
18052 free_netdev(dev);
18053 pci_release_regions(pdev);
18054 pci_disable_device(pdev);
18055 }
18056 }
18057
18058 #ifdef CONFIG_PM_SLEEP
tg3_suspend(struct device * device)18059 static int tg3_suspend(struct device *device)
18060 {
18061 struct pci_dev *pdev = to_pci_dev(device);
18062 struct net_device *dev = pci_get_drvdata(pdev);
18063 struct tg3 *tp = netdev_priv(dev);
18064 int err = 0;
18065
18066 rtnl_lock();
18067
18068 if (!netif_running(dev))
18069 goto unlock;
18070
18071 tg3_reset_task_cancel(tp);
18072 tg3_phy_stop(tp);
18073 tg3_netif_stop(tp);
18074
18075 tg3_timer_stop(tp);
18076
18077 tg3_full_lock(tp, 1);
18078 tg3_disable_ints(tp);
18079 tg3_full_unlock(tp);
18080
18081 netif_device_detach(dev);
18082
18083 tg3_full_lock(tp, 0);
18084 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18085 tg3_flag_clear(tp, INIT_COMPLETE);
18086 tg3_full_unlock(tp);
18087
18088 err = tg3_power_down_prepare(tp);
18089 if (err) {
18090 int err2;
18091
18092 tg3_full_lock(tp, 0);
18093
18094 tg3_flag_set(tp, INIT_COMPLETE);
18095 err2 = tg3_restart_hw(tp, true);
18096 if (err2)
18097 goto out;
18098
18099 tg3_timer_start(tp);
18100
18101 netif_device_attach(dev);
18102 tg3_netif_start(tp);
18103
18104 out:
18105 tg3_full_unlock(tp);
18106
18107 if (!err2)
18108 tg3_phy_start(tp);
18109 }
18110
18111 unlock:
18112 rtnl_unlock();
18113 return err;
18114 }
18115
tg3_resume(struct device * device)18116 static int tg3_resume(struct device *device)
18117 {
18118 struct pci_dev *pdev = to_pci_dev(device);
18119 struct net_device *dev = pci_get_drvdata(pdev);
18120 struct tg3 *tp = netdev_priv(dev);
18121 int err = 0;
18122
18123 rtnl_lock();
18124
18125 if (!netif_running(dev))
18126 goto unlock;
18127
18128 netif_device_attach(dev);
18129
18130 tg3_full_lock(tp, 0);
18131
18132 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18133
18134 tg3_flag_set(tp, INIT_COMPLETE);
18135 err = tg3_restart_hw(tp,
18136 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18137 if (err)
18138 goto out;
18139
18140 tg3_timer_start(tp);
18141
18142 tg3_netif_start(tp);
18143
18144 out:
18145 tg3_full_unlock(tp);
18146
18147 if (!err)
18148 tg3_phy_start(tp);
18149
18150 unlock:
18151 rtnl_unlock();
18152 return err;
18153 }
18154 #endif /* CONFIG_PM_SLEEP */
18155
18156 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18157
tg3_shutdown(struct pci_dev * pdev)18158 static void tg3_shutdown(struct pci_dev *pdev)
18159 {
18160 struct net_device *dev = pci_get_drvdata(pdev);
18161 struct tg3 *tp = netdev_priv(dev);
18162
18163 rtnl_lock();
18164 netif_device_detach(dev);
18165
18166 if (netif_running(dev))
18167 dev_close(dev);
18168
18169 if (system_state == SYSTEM_POWER_OFF)
18170 tg3_power_down(tp);
18171
18172 rtnl_unlock();
18173 }
18174
18175 /**
18176 * tg3_io_error_detected - called when PCI error is detected
18177 * @pdev: Pointer to PCI device
18178 * @state: The current pci connection state
18179 *
18180 * This function is called after a PCI bus error affecting
18181 * this device has been detected.
18182 */
tg3_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)18183 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18184 pci_channel_state_t state)
18185 {
18186 struct net_device *netdev = pci_get_drvdata(pdev);
18187 struct tg3 *tp = netdev_priv(netdev);
18188 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18189
18190 netdev_info(netdev, "PCI I/O error detected\n");
18191
18192 rtnl_lock();
18193
18194 /* Could be second call or maybe we don't have netdev yet */
18195 if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18196 goto done;
18197
18198 /* We needn't recover from permanent error */
18199 if (state == pci_channel_io_frozen)
18200 tp->pcierr_recovery = true;
18201
18202 tg3_phy_stop(tp);
18203
18204 tg3_netif_stop(tp);
18205
18206 tg3_timer_stop(tp);
18207
18208 /* Want to make sure that the reset task doesn't run */
18209 tg3_reset_task_cancel(tp);
18210
18211 netif_device_detach(netdev);
18212
18213 /* Clean up software state, even if MMIO is blocked */
18214 tg3_full_lock(tp, 0);
18215 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18216 tg3_full_unlock(tp);
18217
18218 done:
18219 if (state == pci_channel_io_perm_failure) {
18220 if (netdev) {
18221 tg3_napi_enable(tp);
18222 dev_close(netdev);
18223 }
18224 err = PCI_ERS_RESULT_DISCONNECT;
18225 } else {
18226 pci_disable_device(pdev);
18227 }
18228
18229 rtnl_unlock();
18230
18231 return err;
18232 }
18233
18234 /**
18235 * tg3_io_slot_reset - called after the pci bus has been reset.
18236 * @pdev: Pointer to PCI device
18237 *
18238 * Restart the card from scratch, as if from a cold-boot.
18239 * At this point, the card has exprienced a hard reset,
18240 * followed by fixups by BIOS, and has its config space
18241 * set up identically to what it was at cold boot.
18242 */
tg3_io_slot_reset(struct pci_dev * pdev)18243 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18244 {
18245 struct net_device *netdev = pci_get_drvdata(pdev);
18246 struct tg3 *tp = netdev_priv(netdev);
18247 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18248 int err;
18249
18250 rtnl_lock();
18251
18252 if (pci_enable_device(pdev)) {
18253 dev_err(&pdev->dev,
18254 "Cannot re-enable PCI device after reset.\n");
18255 goto done;
18256 }
18257
18258 pci_set_master(pdev);
18259 pci_restore_state(pdev);
18260 pci_save_state(pdev);
18261
18262 if (!netdev || !netif_running(netdev)) {
18263 rc = PCI_ERS_RESULT_RECOVERED;
18264 goto done;
18265 }
18266
18267 err = tg3_power_up(tp);
18268 if (err)
18269 goto done;
18270
18271 rc = PCI_ERS_RESULT_RECOVERED;
18272
18273 done:
18274 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18275 tg3_napi_enable(tp);
18276 dev_close(netdev);
18277 }
18278 rtnl_unlock();
18279
18280 return rc;
18281 }
18282
18283 /**
18284 * tg3_io_resume - called when traffic can start flowing again.
18285 * @pdev: Pointer to PCI device
18286 *
18287 * This callback is called when the error recovery driver tells
18288 * us that its OK to resume normal operation.
18289 */
tg3_io_resume(struct pci_dev * pdev)18290 static void tg3_io_resume(struct pci_dev *pdev)
18291 {
18292 struct net_device *netdev = pci_get_drvdata(pdev);
18293 struct tg3 *tp = netdev_priv(netdev);
18294 int err;
18295
18296 rtnl_lock();
18297
18298 if (!netdev || !netif_running(netdev))
18299 goto done;
18300
18301 tg3_full_lock(tp, 0);
18302 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18303 tg3_flag_set(tp, INIT_COMPLETE);
18304 err = tg3_restart_hw(tp, true);
18305 if (err) {
18306 tg3_full_unlock(tp);
18307 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18308 goto done;
18309 }
18310
18311 netif_device_attach(netdev);
18312
18313 tg3_timer_start(tp);
18314
18315 tg3_netif_start(tp);
18316
18317 tg3_full_unlock(tp);
18318
18319 tg3_phy_start(tp);
18320
18321 done:
18322 tp->pcierr_recovery = false;
18323 rtnl_unlock();
18324 }
18325
18326 static const struct pci_error_handlers tg3_err_handler = {
18327 .error_detected = tg3_io_error_detected,
18328 .slot_reset = tg3_io_slot_reset,
18329 .resume = tg3_io_resume
18330 };
18331
18332 static struct pci_driver tg3_driver = {
18333 .name = DRV_MODULE_NAME,
18334 .id_table = tg3_pci_tbl,
18335 .probe = tg3_init_one,
18336 .remove = tg3_remove_one,
18337 .err_handler = &tg3_err_handler,
18338 .driver.pm = &tg3_pm_ops,
18339 .shutdown = tg3_shutdown,
18340 };
18341
18342 module_pci_driver(tg3_driver);
18343