• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *	Derived from proprietary unpublished source code,
14  *	Copyright (C) 2000-2016 Broadcom Corporation.
15  *	Copyright (C) 2016-2017 Broadcom Ltd.
16  *	Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *	refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *	Permission is hereby granted for the distribution of this firmware
20  *	data in hexadecimal or equivalent format, provided this copyright
21  *	notice is accompanying it.
22  */
23 
24 
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58 
59 #include <net/checksum.h>
60 #include <net/ip.h>
61 
62 #include <linux/io.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
65 
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
68 
69 #define BAR_0	0
70 #define BAR_2	2
71 
72 #include "tg3.h"
73 
74 /* Functions & macros to verify TG3_FLAGS types */
75 
_tg3_flag(enum TG3_FLAGS flag,unsigned long * bits)76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78 	return test_bit(flag, bits);
79 }
80 
_tg3_flag_set(enum TG3_FLAGS flag,unsigned long * bits)81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83 	set_bit(flag, bits);
84 }
85 
_tg3_flag_clear(enum TG3_FLAGS flag,unsigned long * bits)86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
87 {
88 	clear_bit(flag, bits);
89 }
90 
91 #define tg3_flag(tp, flag)				\
92 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag)				\
94 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag)			\
96 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
97 
98 #define DRV_MODULE_NAME		"tg3"
99 /* DO NOT UPDATE TG3_*_NUM defines */
100 #define TG3_MAJ_NUM			3
101 #define TG3_MIN_NUM			137
102 
103 #define RESET_KIND_SHUTDOWN	0
104 #define RESET_KIND_INIT		1
105 #define RESET_KIND_SUSPEND	2
106 
107 #define TG3_DEF_RX_MODE		0
108 #define TG3_DEF_TX_MODE		0
109 #define TG3_DEF_MSG_ENABLE	  \
110 	(NETIF_MSG_DRV		| \
111 	 NETIF_MSG_PROBE	| \
112 	 NETIF_MSG_LINK		| \
113 	 NETIF_MSG_TIMER	| \
114 	 NETIF_MSG_IFDOWN	| \
115 	 NETIF_MSG_IFUP		| \
116 	 NETIF_MSG_RX_ERR	| \
117 	 NETIF_MSG_TX_ERR)
118 
119 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
120 
121 /* length of time before we decide the hardware is borked,
122  * and dev->tx_timeout() should be called to fix the problem
123  */
124 
125 #define TG3_TX_TIMEOUT			(5 * HZ)
126 
127 /* hardware minimum and maximum for a single frame's data payload */
128 #define TG3_MIN_MTU			ETH_ZLEN
129 #define TG3_MAX_MTU(tp)	\
130 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 
132 /* These numbers seem to be hard coded in the NIC firmware somehow.
133  * You can't change the ring sizes, but you can change where you place
134  * them in the NIC onboard memory.
135  */
136 #define TG3_RX_STD_RING_SIZE(tp) \
137 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
138 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
139 #define TG3_DEF_RX_RING_PENDING		200
140 #define TG3_RX_JMB_RING_SIZE(tp) \
141 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
142 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
143 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
144 
145 /* Do not place this n-ring entries value into the tp struct itself,
146  * we really want to expose these constants to GCC so that modulo et
147  * al.  operations are done with shifts and masks instead of with
148  * hw multiply/modulo instructions.  Another solution would be to
149  * replace things like '% foo' with '& (foo - 1)'.
150  */
151 
152 #define TG3_TX_RING_SIZE		512
153 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
154 
155 #define TG3_RX_STD_RING_BYTES(tp) \
156 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
157 #define TG3_RX_JMB_RING_BYTES(tp) \
158 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
159 #define TG3_RX_RCB_RING_BYTES(tp) \
160 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
161 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
162 				 TG3_TX_RING_SIZE)
163 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 
165 #define TG3_DMA_BYTE_ENAB		64
166 
167 #define TG3_RX_STD_DMA_SZ		1536
168 #define TG3_RX_JMB_DMA_SZ		9046
169 
170 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
171 
172 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
173 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 
175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
176 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 
178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
179 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 
181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
182  * that are at least dword aligned when used in PCIX mode.  The driver
183  * works around this bug by double copying the packet.  This workaround
184  * is built into the normal double copy length check for efficiency.
185  *
186  * However, the double copy is only necessary on those architectures
187  * where unaligned memory accesses are inefficient.  For those architectures
188  * where unaligned memory accesses incur little penalty, we can reintegrate
189  * the 5701 in the normal rx path.  Doing so saves a device structure
190  * dereference by hardcoding the double copy threshold in place.
191  */
192 #define TG3_RX_COPY_THRESHOLD		256
193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
194 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
195 #else
196 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
197 #endif
198 
199 #if (NET_IP_ALIGN != 0)
200 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
201 #else
202 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
203 #endif
204 
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
207 #define TG3_TX_BD_DMA_MAX_2K		2048
208 #define TG3_TX_BD_DMA_MAX_4K		4096
209 
210 #define TG3_RAW_IP_ALIGN 2
211 
212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
214 
215 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
216 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
217 
218 #define FIRMWARE_TG3		"tigon/tg3.bin"
219 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
220 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
221 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
222 
223 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
224 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
225 MODULE_LICENSE("GPL");
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG357766);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
229 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
230 
231 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
232 module_param(tg3_debug, int, 0);
233 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
234 
235 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
236 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
237 
238 static const struct pci_device_id tg3_pci_tbl[] = {
239 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
258 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
259 			TG3_DRV_DATA_FLAG_5705_10_100},
260 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
261 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
262 			TG3_DRV_DATA_FLAG_5705_10_100},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
264 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
265 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
266 			TG3_DRV_DATA_FLAG_5705_10_100},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
272 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
273 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
278 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
279 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
287 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
288 			PCI_VENDOR_ID_LENOVO,
289 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
290 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
292 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
293 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
311 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
312 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
313 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
314 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
315 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
316 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
317 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
318 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
319 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
320 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
321 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
322 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
323 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
330 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
331 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
332 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
333 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
347 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
348 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
349 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
350 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
351 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
352 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
353 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
354 	{}
355 };
356 
357 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
358 
359 static const struct {
360 	const char string[ETH_GSTRING_LEN];
361 } ethtool_stats_keys[] = {
362 	{ "rx_octets" },
363 	{ "rx_fragments" },
364 	{ "rx_ucast_packets" },
365 	{ "rx_mcast_packets" },
366 	{ "rx_bcast_packets" },
367 	{ "rx_fcs_errors" },
368 	{ "rx_align_errors" },
369 	{ "rx_xon_pause_rcvd" },
370 	{ "rx_xoff_pause_rcvd" },
371 	{ "rx_mac_ctrl_rcvd" },
372 	{ "rx_xoff_entered" },
373 	{ "rx_frame_too_long_errors" },
374 	{ "rx_jabbers" },
375 	{ "rx_undersize_packets" },
376 	{ "rx_in_length_errors" },
377 	{ "rx_out_length_errors" },
378 	{ "rx_64_or_less_octet_packets" },
379 	{ "rx_65_to_127_octet_packets" },
380 	{ "rx_128_to_255_octet_packets" },
381 	{ "rx_256_to_511_octet_packets" },
382 	{ "rx_512_to_1023_octet_packets" },
383 	{ "rx_1024_to_1522_octet_packets" },
384 	{ "rx_1523_to_2047_octet_packets" },
385 	{ "rx_2048_to_4095_octet_packets" },
386 	{ "rx_4096_to_8191_octet_packets" },
387 	{ "rx_8192_to_9022_octet_packets" },
388 
389 	{ "tx_octets" },
390 	{ "tx_collisions" },
391 
392 	{ "tx_xon_sent" },
393 	{ "tx_xoff_sent" },
394 	{ "tx_flow_control" },
395 	{ "tx_mac_errors" },
396 	{ "tx_single_collisions" },
397 	{ "tx_mult_collisions" },
398 	{ "tx_deferred" },
399 	{ "tx_excessive_collisions" },
400 	{ "tx_late_collisions" },
401 	{ "tx_collide_2times" },
402 	{ "tx_collide_3times" },
403 	{ "tx_collide_4times" },
404 	{ "tx_collide_5times" },
405 	{ "tx_collide_6times" },
406 	{ "tx_collide_7times" },
407 	{ "tx_collide_8times" },
408 	{ "tx_collide_9times" },
409 	{ "tx_collide_10times" },
410 	{ "tx_collide_11times" },
411 	{ "tx_collide_12times" },
412 	{ "tx_collide_13times" },
413 	{ "tx_collide_14times" },
414 	{ "tx_collide_15times" },
415 	{ "tx_ucast_packets" },
416 	{ "tx_mcast_packets" },
417 	{ "tx_bcast_packets" },
418 	{ "tx_carrier_sense_errors" },
419 	{ "tx_discards" },
420 	{ "tx_errors" },
421 
422 	{ "dma_writeq_full" },
423 	{ "dma_write_prioq_full" },
424 	{ "rxbds_empty" },
425 	{ "rx_discards" },
426 	{ "rx_errors" },
427 	{ "rx_threshold_hit" },
428 
429 	{ "dma_readq_full" },
430 	{ "dma_read_prioq_full" },
431 	{ "tx_comp_queue_full" },
432 
433 	{ "ring_set_send_prod_index" },
434 	{ "ring_status_update" },
435 	{ "nic_irqs" },
436 	{ "nic_avoided_irqs" },
437 	{ "nic_tx_threshold_hit" },
438 
439 	{ "mbuf_lwm_thresh_hit" },
440 };
441 
442 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
443 #define TG3_NVRAM_TEST		0
444 #define TG3_LINK_TEST		1
445 #define TG3_REGISTER_TEST	2
446 #define TG3_MEMORY_TEST		3
447 #define TG3_MAC_LOOPB_TEST	4
448 #define TG3_PHY_LOOPB_TEST	5
449 #define TG3_EXT_LOOPB_TEST	6
450 #define TG3_INTERRUPT_TEST	7
451 
452 
453 static const struct {
454 	const char string[ETH_GSTRING_LEN];
455 } ethtool_test_keys[] = {
456 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
457 	[TG3_LINK_TEST]		= { "link test         (online) " },
458 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
459 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
460 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
461 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
462 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
463 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
464 };
465 
466 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
467 
468 
tg3_write32(struct tg3 * tp,u32 off,u32 val)469 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
470 {
471 	writel(val, tp->regs + off);
472 }
473 
tg3_read32(struct tg3 * tp,u32 off)474 static u32 tg3_read32(struct tg3 *tp, u32 off)
475 {
476 	return readl(tp->regs + off);
477 }
478 
tg3_ape_write32(struct tg3 * tp,u32 off,u32 val)479 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
480 {
481 	writel(val, tp->aperegs + off);
482 }
483 
tg3_ape_read32(struct tg3 * tp,u32 off)484 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
485 {
486 	return readl(tp->aperegs + off);
487 }
488 
tg3_write_indirect_reg32(struct tg3 * tp,u32 off,u32 val)489 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
490 {
491 	unsigned long flags;
492 
493 	spin_lock_irqsave(&tp->indirect_lock, flags);
494 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
495 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
496 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
497 }
498 
tg3_write_flush_reg32(struct tg3 * tp,u32 off,u32 val)499 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
500 {
501 	writel(val, tp->regs + off);
502 	readl(tp->regs + off);
503 }
504 
tg3_read_indirect_reg32(struct tg3 * tp,u32 off)505 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
506 {
507 	unsigned long flags;
508 	u32 val;
509 
510 	spin_lock_irqsave(&tp->indirect_lock, flags);
511 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
512 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
513 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 	return val;
515 }
516 
tg3_write_indirect_mbox(struct tg3 * tp,u32 off,u32 val)517 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
518 {
519 	unsigned long flags;
520 
521 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
522 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
523 				       TG3_64BIT_REG_LOW, val);
524 		return;
525 	}
526 	if (off == TG3_RX_STD_PROD_IDX_REG) {
527 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
528 				       TG3_64BIT_REG_LOW, val);
529 		return;
530 	}
531 
532 	spin_lock_irqsave(&tp->indirect_lock, flags);
533 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
534 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
535 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
536 
537 	/* In indirect mode when disabling interrupts, we also need
538 	 * to clear the interrupt bit in the GRC local ctrl register.
539 	 */
540 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
541 	    (val == 0x1)) {
542 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
543 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
544 	}
545 }
546 
tg3_read_indirect_mbox(struct tg3 * tp,u32 off)547 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
548 {
549 	unsigned long flags;
550 	u32 val;
551 
552 	spin_lock_irqsave(&tp->indirect_lock, flags);
553 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
554 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
555 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
556 	return val;
557 }
558 
559 /* usec_wait specifies the wait time in usec when writing to certain registers
560  * where it is unsafe to read back the register without some delay.
561  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
562  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
563  */
_tw32_flush(struct tg3 * tp,u32 off,u32 val,u32 usec_wait)564 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
565 {
566 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
567 		/* Non-posted methods */
568 		tp->write32(tp, off, val);
569 	else {
570 		/* Posted method */
571 		tg3_write32(tp, off, val);
572 		if (usec_wait)
573 			udelay(usec_wait);
574 		tp->read32(tp, off);
575 	}
576 	/* Wait again after the read for the posted method to guarantee that
577 	 * the wait time is met.
578 	 */
579 	if (usec_wait)
580 		udelay(usec_wait);
581 }
582 
tw32_mailbox_flush(struct tg3 * tp,u32 off,u32 val)583 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
584 {
585 	tp->write32_mbox(tp, off, val);
586 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
587 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
588 	     !tg3_flag(tp, ICH_WORKAROUND)))
589 		tp->read32_mbox(tp, off);
590 }
591 
tg3_write32_tx_mbox(struct tg3 * tp,u32 off,u32 val)592 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
593 {
594 	void __iomem *mbox = tp->regs + off;
595 	writel(val, mbox);
596 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
597 		writel(val, mbox);
598 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
599 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
600 		readl(mbox);
601 }
602 
tg3_read32_mbox_5906(struct tg3 * tp,u32 off)603 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
604 {
605 	return readl(tp->regs + off + GRCMBOX_BASE);
606 }
607 
tg3_write32_mbox_5906(struct tg3 * tp,u32 off,u32 val)608 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
609 {
610 	writel(val, tp->regs + off + GRCMBOX_BASE);
611 }
612 
613 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
614 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
615 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
616 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
617 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
618 
619 #define tw32(reg, val)			tp->write32(tp, reg, val)
620 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
621 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
622 #define tr32(reg)			tp->read32(tp, reg)
623 
tg3_write_mem(struct tg3 * tp,u32 off,u32 val)624 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
625 {
626 	unsigned long flags;
627 
628 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
629 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
630 		return;
631 
632 	spin_lock_irqsave(&tp->indirect_lock, flags);
633 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
634 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
635 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
636 
637 		/* Always leave this as zero. */
638 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 	} else {
640 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
641 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
642 
643 		/* Always leave this as zero. */
644 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
645 	}
646 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
647 }
648 
tg3_read_mem(struct tg3 * tp,u32 off,u32 * val)649 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
650 {
651 	unsigned long flags;
652 
653 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
654 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
655 		*val = 0;
656 		return;
657 	}
658 
659 	spin_lock_irqsave(&tp->indirect_lock, flags);
660 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
661 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
662 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
663 
664 		/* Always leave this as zero. */
665 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 	} else {
667 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
668 		*val = tr32(TG3PCI_MEM_WIN_DATA);
669 
670 		/* Always leave this as zero. */
671 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
672 	}
673 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
674 }
675 
tg3_ape_lock_init(struct tg3 * tp)676 static void tg3_ape_lock_init(struct tg3 *tp)
677 {
678 	int i;
679 	u32 regbase, bit;
680 
681 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
682 		regbase = TG3_APE_LOCK_GRANT;
683 	else
684 		regbase = TG3_APE_PER_LOCK_GRANT;
685 
686 	/* Make sure the driver hasn't any stale locks. */
687 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
688 		switch (i) {
689 		case TG3_APE_LOCK_PHY0:
690 		case TG3_APE_LOCK_PHY1:
691 		case TG3_APE_LOCK_PHY2:
692 		case TG3_APE_LOCK_PHY3:
693 			bit = APE_LOCK_GRANT_DRIVER;
694 			break;
695 		default:
696 			if (!tp->pci_fn)
697 				bit = APE_LOCK_GRANT_DRIVER;
698 			else
699 				bit = 1 << tp->pci_fn;
700 		}
701 		tg3_ape_write32(tp, regbase + 4 * i, bit);
702 	}
703 
704 }
705 
tg3_ape_lock(struct tg3 * tp,int locknum)706 static int tg3_ape_lock(struct tg3 *tp, int locknum)
707 {
708 	int i, off;
709 	int ret = 0;
710 	u32 status, req, gnt, bit;
711 
712 	if (!tg3_flag(tp, ENABLE_APE))
713 		return 0;
714 
715 	switch (locknum) {
716 	case TG3_APE_LOCK_GPIO:
717 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
718 			return 0;
719 		fallthrough;
720 	case TG3_APE_LOCK_GRC:
721 	case TG3_APE_LOCK_MEM:
722 		if (!tp->pci_fn)
723 			bit = APE_LOCK_REQ_DRIVER;
724 		else
725 			bit = 1 << tp->pci_fn;
726 		break;
727 	case TG3_APE_LOCK_PHY0:
728 	case TG3_APE_LOCK_PHY1:
729 	case TG3_APE_LOCK_PHY2:
730 	case TG3_APE_LOCK_PHY3:
731 		bit = APE_LOCK_REQ_DRIVER;
732 		break;
733 	default:
734 		return -EINVAL;
735 	}
736 
737 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
738 		req = TG3_APE_LOCK_REQ;
739 		gnt = TG3_APE_LOCK_GRANT;
740 	} else {
741 		req = TG3_APE_PER_LOCK_REQ;
742 		gnt = TG3_APE_PER_LOCK_GRANT;
743 	}
744 
745 	off = 4 * locknum;
746 
747 	tg3_ape_write32(tp, req + off, bit);
748 
749 	/* Wait for up to 1 millisecond to acquire lock. */
750 	for (i = 0; i < 100; i++) {
751 		status = tg3_ape_read32(tp, gnt + off);
752 		if (status == bit)
753 			break;
754 		if (pci_channel_offline(tp->pdev))
755 			break;
756 
757 		udelay(10);
758 	}
759 
760 	if (status != bit) {
761 		/* Revoke the lock request. */
762 		tg3_ape_write32(tp, gnt + off, bit);
763 		ret = -EBUSY;
764 	}
765 
766 	return ret;
767 }
768 
tg3_ape_unlock(struct tg3 * tp,int locknum)769 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
770 {
771 	u32 gnt, bit;
772 
773 	if (!tg3_flag(tp, ENABLE_APE))
774 		return;
775 
776 	switch (locknum) {
777 	case TG3_APE_LOCK_GPIO:
778 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
779 			return;
780 		fallthrough;
781 	case TG3_APE_LOCK_GRC:
782 	case TG3_APE_LOCK_MEM:
783 		if (!tp->pci_fn)
784 			bit = APE_LOCK_GRANT_DRIVER;
785 		else
786 			bit = 1 << tp->pci_fn;
787 		break;
788 	case TG3_APE_LOCK_PHY0:
789 	case TG3_APE_LOCK_PHY1:
790 	case TG3_APE_LOCK_PHY2:
791 	case TG3_APE_LOCK_PHY3:
792 		bit = APE_LOCK_GRANT_DRIVER;
793 		break;
794 	default:
795 		return;
796 	}
797 
798 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
799 		gnt = TG3_APE_LOCK_GRANT;
800 	else
801 		gnt = TG3_APE_PER_LOCK_GRANT;
802 
803 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
804 }
805 
tg3_ape_event_lock(struct tg3 * tp,u32 timeout_us)806 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
807 {
808 	u32 apedata;
809 
810 	while (timeout_us) {
811 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
812 			return -EBUSY;
813 
814 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
815 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
816 			break;
817 
818 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
819 
820 		udelay(10);
821 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
822 	}
823 
824 	return timeout_us ? 0 : -EBUSY;
825 }
826 
827 #ifdef CONFIG_TIGON3_HWMON
tg3_ape_wait_for_event(struct tg3 * tp,u32 timeout_us)828 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
829 {
830 	u32 i, apedata;
831 
832 	for (i = 0; i < timeout_us / 10; i++) {
833 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
834 
835 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
836 			break;
837 
838 		udelay(10);
839 	}
840 
841 	return i == timeout_us / 10;
842 }
843 
tg3_ape_scratchpad_read(struct tg3 * tp,u32 * data,u32 base_off,u32 len)844 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
845 				   u32 len)
846 {
847 	int err;
848 	u32 i, bufoff, msgoff, maxlen, apedata;
849 
850 	if (!tg3_flag(tp, APE_HAS_NCSI))
851 		return 0;
852 
853 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
854 	if (apedata != APE_SEG_SIG_MAGIC)
855 		return -ENODEV;
856 
857 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
858 	if (!(apedata & APE_FW_STATUS_READY))
859 		return -EAGAIN;
860 
861 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
862 		 TG3_APE_SHMEM_BASE;
863 	msgoff = bufoff + 2 * sizeof(u32);
864 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
865 
866 	while (len) {
867 		u32 length;
868 
869 		/* Cap xfer sizes to scratchpad limits. */
870 		length = (len > maxlen) ? maxlen : len;
871 		len -= length;
872 
873 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
874 		if (!(apedata & APE_FW_STATUS_READY))
875 			return -EAGAIN;
876 
877 		/* Wait for up to 1 msec for APE to service previous event. */
878 		err = tg3_ape_event_lock(tp, 1000);
879 		if (err)
880 			return err;
881 
882 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
883 			  APE_EVENT_STATUS_SCRTCHPD_READ |
884 			  APE_EVENT_STATUS_EVENT_PENDING;
885 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
886 
887 		tg3_ape_write32(tp, bufoff, base_off);
888 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
889 
890 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
891 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
892 
893 		base_off += length;
894 
895 		if (tg3_ape_wait_for_event(tp, 30000))
896 			return -EAGAIN;
897 
898 		for (i = 0; length; i += 4, length -= 4) {
899 			u32 val = tg3_ape_read32(tp, msgoff + i);
900 			memcpy(data, &val, sizeof(u32));
901 			data++;
902 		}
903 	}
904 
905 	return 0;
906 }
907 #endif
908 
tg3_ape_send_event(struct tg3 * tp,u32 event)909 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
910 {
911 	int err;
912 	u32 apedata;
913 
914 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
915 	if (apedata != APE_SEG_SIG_MAGIC)
916 		return -EAGAIN;
917 
918 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
919 	if (!(apedata & APE_FW_STATUS_READY))
920 		return -EAGAIN;
921 
922 	/* Wait for up to 20 millisecond for APE to service previous event. */
923 	err = tg3_ape_event_lock(tp, 20000);
924 	if (err)
925 		return err;
926 
927 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
928 			event | APE_EVENT_STATUS_EVENT_PENDING);
929 
930 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
931 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
932 
933 	return 0;
934 }
935 
tg3_ape_driver_state_change(struct tg3 * tp,int kind)936 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
937 {
938 	u32 event;
939 	u32 apedata;
940 
941 	if (!tg3_flag(tp, ENABLE_APE))
942 		return;
943 
944 	switch (kind) {
945 	case RESET_KIND_INIT:
946 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
947 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
948 				APE_HOST_SEG_SIG_MAGIC);
949 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
950 				APE_HOST_SEG_LEN_MAGIC);
951 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
952 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
953 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
954 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
955 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
956 				APE_HOST_BEHAV_NO_PHYLOCK);
957 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
958 				    TG3_APE_HOST_DRVR_STATE_START);
959 
960 		event = APE_EVENT_STATUS_STATE_START;
961 		break;
962 	case RESET_KIND_SHUTDOWN:
963 		if (device_may_wakeup(&tp->pdev->dev) &&
964 		    tg3_flag(tp, WOL_ENABLE)) {
965 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
966 					    TG3_APE_HOST_WOL_SPEED_AUTO);
967 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
968 		} else
969 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
970 
971 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
972 
973 		event = APE_EVENT_STATUS_STATE_UNLOAD;
974 		break;
975 	default:
976 		return;
977 	}
978 
979 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
980 
981 	tg3_ape_send_event(tp, event);
982 }
983 
tg3_send_ape_heartbeat(struct tg3 * tp,unsigned long interval)984 static void tg3_send_ape_heartbeat(struct tg3 *tp,
985 				   unsigned long interval)
986 {
987 	/* Check if hb interval has exceeded */
988 	if (!tg3_flag(tp, ENABLE_APE) ||
989 	    time_before(jiffies, tp->ape_hb_jiffies + interval))
990 		return;
991 
992 	tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
993 	tp->ape_hb_jiffies = jiffies;
994 }
995 
tg3_disable_ints(struct tg3 * tp)996 static void tg3_disable_ints(struct tg3 *tp)
997 {
998 	int i;
999 
1000 	tw32(TG3PCI_MISC_HOST_CTRL,
1001 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1002 	for (i = 0; i < tp->irq_max; i++)
1003 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1004 }
1005 
tg3_enable_ints(struct tg3 * tp)1006 static void tg3_enable_ints(struct tg3 *tp)
1007 {
1008 	int i;
1009 
1010 	tp->irq_sync = 0;
1011 	wmb();
1012 
1013 	tw32(TG3PCI_MISC_HOST_CTRL,
1014 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1015 
1016 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1017 	for (i = 0; i < tp->irq_cnt; i++) {
1018 		struct tg3_napi *tnapi = &tp->napi[i];
1019 
1020 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1021 		if (tg3_flag(tp, 1SHOT_MSI))
1022 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1023 
1024 		tp->coal_now |= tnapi->coal_now;
1025 	}
1026 
1027 	/* Force an initial interrupt */
1028 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1029 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1030 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1031 	else
1032 		tw32(HOSTCC_MODE, tp->coal_now);
1033 
1034 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1035 }
1036 
tg3_has_work(struct tg3_napi * tnapi)1037 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1038 {
1039 	struct tg3 *tp = tnapi->tp;
1040 	struct tg3_hw_status *sblk = tnapi->hw_status;
1041 	unsigned int work_exists = 0;
1042 
1043 	/* check for phy events */
1044 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1045 		if (sblk->status & SD_STATUS_LINK_CHG)
1046 			work_exists = 1;
1047 	}
1048 
1049 	/* check for TX work to do */
1050 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1051 		work_exists = 1;
1052 
1053 	/* check for RX work to do */
1054 	if (tnapi->rx_rcb_prod_idx &&
1055 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1056 		work_exists = 1;
1057 
1058 	return work_exists;
1059 }
1060 
1061 /* tg3_int_reenable
1062  *  similar to tg3_enable_ints, but it accurately determines whether there
1063  *  is new work pending and can return without flushing the PIO write
1064  *  which reenables interrupts
1065  */
tg3_int_reenable(struct tg3_napi * tnapi)1066 static void tg3_int_reenable(struct tg3_napi *tnapi)
1067 {
1068 	struct tg3 *tp = tnapi->tp;
1069 
1070 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1071 
1072 	/* When doing tagged status, this work check is unnecessary.
1073 	 * The last_tag we write above tells the chip which piece of
1074 	 * work we've completed.
1075 	 */
1076 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1077 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1078 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1079 }
1080 
tg3_switch_clocks(struct tg3 * tp)1081 static void tg3_switch_clocks(struct tg3 *tp)
1082 {
1083 	u32 clock_ctrl;
1084 	u32 orig_clock_ctrl;
1085 
1086 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1087 		return;
1088 
1089 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1090 
1091 	orig_clock_ctrl = clock_ctrl;
1092 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1093 		       CLOCK_CTRL_CLKRUN_OENABLE |
1094 		       0x1f);
1095 	tp->pci_clock_ctrl = clock_ctrl;
1096 
1097 	if (tg3_flag(tp, 5705_PLUS)) {
1098 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1099 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1100 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1101 		}
1102 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1103 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1104 			    clock_ctrl |
1105 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1106 			    40);
1107 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1108 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1109 			    40);
1110 	}
1111 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1112 }
1113 
1114 #define PHY_BUSY_LOOPS	5000
1115 
__tg3_readphy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 * val)1116 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1117 			 u32 *val)
1118 {
1119 	u32 frame_val;
1120 	unsigned int loops;
1121 	int ret;
1122 
1123 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1124 		tw32_f(MAC_MI_MODE,
1125 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1126 		udelay(80);
1127 	}
1128 
1129 	tg3_ape_lock(tp, tp->phy_ape_lock);
1130 
1131 	*val = 0x0;
1132 
1133 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1134 		      MI_COM_PHY_ADDR_MASK);
1135 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1136 		      MI_COM_REG_ADDR_MASK);
1137 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1138 
1139 	tw32_f(MAC_MI_COM, frame_val);
1140 
1141 	loops = PHY_BUSY_LOOPS;
1142 	while (loops != 0) {
1143 		udelay(10);
1144 		frame_val = tr32(MAC_MI_COM);
1145 
1146 		if ((frame_val & MI_COM_BUSY) == 0) {
1147 			udelay(5);
1148 			frame_val = tr32(MAC_MI_COM);
1149 			break;
1150 		}
1151 		loops -= 1;
1152 	}
1153 
1154 	ret = -EBUSY;
1155 	if (loops != 0) {
1156 		*val = frame_val & MI_COM_DATA_MASK;
1157 		ret = 0;
1158 	}
1159 
1160 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1161 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1162 		udelay(80);
1163 	}
1164 
1165 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1166 
1167 	return ret;
1168 }
1169 
tg3_readphy(struct tg3 * tp,int reg,u32 * val)1170 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1171 {
1172 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1173 }
1174 
__tg3_writephy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 val)1175 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1176 			  u32 val)
1177 {
1178 	u32 frame_val;
1179 	unsigned int loops;
1180 	int ret;
1181 
1182 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1183 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1184 		return 0;
1185 
1186 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1187 		tw32_f(MAC_MI_MODE,
1188 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1189 		udelay(80);
1190 	}
1191 
1192 	tg3_ape_lock(tp, tp->phy_ape_lock);
1193 
1194 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1195 		      MI_COM_PHY_ADDR_MASK);
1196 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1197 		      MI_COM_REG_ADDR_MASK);
1198 	frame_val |= (val & MI_COM_DATA_MASK);
1199 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1200 
1201 	tw32_f(MAC_MI_COM, frame_val);
1202 
1203 	loops = PHY_BUSY_LOOPS;
1204 	while (loops != 0) {
1205 		udelay(10);
1206 		frame_val = tr32(MAC_MI_COM);
1207 		if ((frame_val & MI_COM_BUSY) == 0) {
1208 			udelay(5);
1209 			frame_val = tr32(MAC_MI_COM);
1210 			break;
1211 		}
1212 		loops -= 1;
1213 	}
1214 
1215 	ret = -EBUSY;
1216 	if (loops != 0)
1217 		ret = 0;
1218 
1219 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1220 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1221 		udelay(80);
1222 	}
1223 
1224 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1225 
1226 	return ret;
1227 }
1228 
tg3_writephy(struct tg3 * tp,int reg,u32 val)1229 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1230 {
1231 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1232 }
1233 
tg3_phy_cl45_write(struct tg3 * tp,u32 devad,u32 addr,u32 val)1234 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1235 {
1236 	int err;
1237 
1238 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1239 	if (err)
1240 		goto done;
1241 
1242 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1243 	if (err)
1244 		goto done;
1245 
1246 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1247 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1248 	if (err)
1249 		goto done;
1250 
1251 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1252 
1253 done:
1254 	return err;
1255 }
1256 
tg3_phy_cl45_read(struct tg3 * tp,u32 devad,u32 addr,u32 * val)1257 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1258 {
1259 	int err;
1260 
1261 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1262 	if (err)
1263 		goto done;
1264 
1265 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1266 	if (err)
1267 		goto done;
1268 
1269 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1270 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1271 	if (err)
1272 		goto done;
1273 
1274 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1275 
1276 done:
1277 	return err;
1278 }
1279 
tg3_phydsp_read(struct tg3 * tp,u32 reg,u32 * val)1280 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1281 {
1282 	int err;
1283 
1284 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1285 	if (!err)
1286 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1287 
1288 	return err;
1289 }
1290 
tg3_phydsp_write(struct tg3 * tp,u32 reg,u32 val)1291 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1292 {
1293 	int err;
1294 
1295 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1296 	if (!err)
1297 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1298 
1299 	return err;
1300 }
1301 
tg3_phy_auxctl_read(struct tg3 * tp,int reg,u32 * val)1302 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1303 {
1304 	int err;
1305 
1306 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1307 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1308 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1309 	if (!err)
1310 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1311 
1312 	return err;
1313 }
1314 
tg3_phy_auxctl_write(struct tg3 * tp,int reg,u32 set)1315 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1316 {
1317 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1318 		set |= MII_TG3_AUXCTL_MISC_WREN;
1319 
1320 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1321 }
1322 
tg3_phy_toggle_auxctl_smdsp(struct tg3 * tp,bool enable)1323 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1324 {
1325 	u32 val;
1326 	int err;
1327 
1328 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1329 
1330 	if (err)
1331 		return err;
1332 
1333 	if (enable)
1334 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1335 	else
1336 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1337 
1338 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1339 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1340 
1341 	return err;
1342 }
1343 
tg3_phy_shdw_write(struct tg3 * tp,int reg,u32 val)1344 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1345 {
1346 	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1347 			    reg | val | MII_TG3_MISC_SHDW_WREN);
1348 }
1349 
tg3_bmcr_reset(struct tg3 * tp)1350 static int tg3_bmcr_reset(struct tg3 *tp)
1351 {
1352 	u32 phy_control;
1353 	int limit, err;
1354 
1355 	/* OK, reset it, and poll the BMCR_RESET bit until it
1356 	 * clears or we time out.
1357 	 */
1358 	phy_control = BMCR_RESET;
1359 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1360 	if (err != 0)
1361 		return -EBUSY;
1362 
1363 	limit = 5000;
1364 	while (limit--) {
1365 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1366 		if (err != 0)
1367 			return -EBUSY;
1368 
1369 		if ((phy_control & BMCR_RESET) == 0) {
1370 			udelay(40);
1371 			break;
1372 		}
1373 		udelay(10);
1374 	}
1375 	if (limit < 0)
1376 		return -EBUSY;
1377 
1378 	return 0;
1379 }
1380 
tg3_mdio_read(struct mii_bus * bp,int mii_id,int reg)1381 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1382 {
1383 	struct tg3 *tp = bp->priv;
1384 	u32 val;
1385 
1386 	spin_lock_bh(&tp->lock);
1387 
1388 	if (__tg3_readphy(tp, mii_id, reg, &val))
1389 		val = -EIO;
1390 
1391 	spin_unlock_bh(&tp->lock);
1392 
1393 	return val;
1394 }
1395 
tg3_mdio_write(struct mii_bus * bp,int mii_id,int reg,u16 val)1396 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1397 {
1398 	struct tg3 *tp = bp->priv;
1399 	u32 ret = 0;
1400 
1401 	spin_lock_bh(&tp->lock);
1402 
1403 	if (__tg3_writephy(tp, mii_id, reg, val))
1404 		ret = -EIO;
1405 
1406 	spin_unlock_bh(&tp->lock);
1407 
1408 	return ret;
1409 }
1410 
tg3_mdio_config_5785(struct tg3 * tp)1411 static void tg3_mdio_config_5785(struct tg3 *tp)
1412 {
1413 	u32 val;
1414 	struct phy_device *phydev;
1415 
1416 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1417 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1418 	case PHY_ID_BCM50610:
1419 	case PHY_ID_BCM50610M:
1420 		val = MAC_PHYCFG2_50610_LED_MODES;
1421 		break;
1422 	case PHY_ID_BCMAC131:
1423 		val = MAC_PHYCFG2_AC131_LED_MODES;
1424 		break;
1425 	case PHY_ID_RTL8211C:
1426 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1427 		break;
1428 	case PHY_ID_RTL8201E:
1429 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1430 		break;
1431 	default:
1432 		return;
1433 	}
1434 
1435 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1436 		tw32(MAC_PHYCFG2, val);
1437 
1438 		val = tr32(MAC_PHYCFG1);
1439 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1440 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1441 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1442 		tw32(MAC_PHYCFG1, val);
1443 
1444 		return;
1445 	}
1446 
1447 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1448 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1449 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1450 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1451 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1452 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1453 		       MAC_PHYCFG2_INBAND_ENABLE;
1454 
1455 	tw32(MAC_PHYCFG2, val);
1456 
1457 	val = tr32(MAC_PHYCFG1);
1458 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1459 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1460 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1461 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1462 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1463 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1464 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1465 	}
1466 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1467 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1468 	tw32(MAC_PHYCFG1, val);
1469 
1470 	val = tr32(MAC_EXT_RGMII_MODE);
1471 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1472 		 MAC_RGMII_MODE_RX_QUALITY |
1473 		 MAC_RGMII_MODE_RX_ACTIVITY |
1474 		 MAC_RGMII_MODE_RX_ENG_DET |
1475 		 MAC_RGMII_MODE_TX_ENABLE |
1476 		 MAC_RGMII_MODE_TX_LOWPWR |
1477 		 MAC_RGMII_MODE_TX_RESET);
1478 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1479 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1480 			val |= MAC_RGMII_MODE_RX_INT_B |
1481 			       MAC_RGMII_MODE_RX_QUALITY |
1482 			       MAC_RGMII_MODE_RX_ACTIVITY |
1483 			       MAC_RGMII_MODE_RX_ENG_DET;
1484 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1485 			val |= MAC_RGMII_MODE_TX_ENABLE |
1486 			       MAC_RGMII_MODE_TX_LOWPWR |
1487 			       MAC_RGMII_MODE_TX_RESET;
1488 	}
1489 	tw32(MAC_EXT_RGMII_MODE, val);
1490 }
1491 
tg3_mdio_start(struct tg3 * tp)1492 static void tg3_mdio_start(struct tg3 *tp)
1493 {
1494 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1495 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1496 	udelay(80);
1497 
1498 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1499 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1500 		tg3_mdio_config_5785(tp);
1501 }
1502 
tg3_mdio_init(struct tg3 * tp)1503 static int tg3_mdio_init(struct tg3 *tp)
1504 {
1505 	int i;
1506 	u32 reg;
1507 	struct phy_device *phydev;
1508 
1509 	if (tg3_flag(tp, 5717_PLUS)) {
1510 		u32 is_serdes;
1511 
1512 		tp->phy_addr = tp->pci_fn + 1;
1513 
1514 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1515 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1516 		else
1517 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1518 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1519 		if (is_serdes)
1520 			tp->phy_addr += 7;
1521 	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1522 		int addr;
1523 
1524 		addr = ssb_gige_get_phyaddr(tp->pdev);
1525 		if (addr < 0)
1526 			return addr;
1527 		tp->phy_addr = addr;
1528 	} else
1529 		tp->phy_addr = TG3_PHY_MII_ADDR;
1530 
1531 	tg3_mdio_start(tp);
1532 
1533 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1534 		return 0;
1535 
1536 	tp->mdio_bus = mdiobus_alloc();
1537 	if (tp->mdio_bus == NULL)
1538 		return -ENOMEM;
1539 
1540 	tp->mdio_bus->name     = "tg3 mdio bus";
1541 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1542 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1543 	tp->mdio_bus->priv     = tp;
1544 	tp->mdio_bus->parent   = &tp->pdev->dev;
1545 	tp->mdio_bus->read     = &tg3_mdio_read;
1546 	tp->mdio_bus->write    = &tg3_mdio_write;
1547 	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1548 
1549 	/* The bus registration will look for all the PHYs on the mdio bus.
1550 	 * Unfortunately, it does not ensure the PHY is powered up before
1551 	 * accessing the PHY ID registers.  A chip reset is the
1552 	 * quickest way to bring the device back to an operational state..
1553 	 */
1554 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1555 		tg3_bmcr_reset(tp);
1556 
1557 	i = mdiobus_register(tp->mdio_bus);
1558 	if (i) {
1559 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1560 		mdiobus_free(tp->mdio_bus);
1561 		return i;
1562 	}
1563 
1564 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1565 
1566 	if (!phydev || !phydev->drv) {
1567 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1568 		mdiobus_unregister(tp->mdio_bus);
1569 		mdiobus_free(tp->mdio_bus);
1570 		return -ENODEV;
1571 	}
1572 
1573 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1574 	case PHY_ID_BCM57780:
1575 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1576 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577 		break;
1578 	case PHY_ID_BCM50610:
1579 	case PHY_ID_BCM50610M:
1580 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1581 				     PHY_BRCM_RX_REFCLK_UNUSED |
1582 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1583 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1584 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1585 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1586 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1587 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1588 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1589 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1590 		fallthrough;
1591 	case PHY_ID_RTL8211C:
1592 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1593 		break;
1594 	case PHY_ID_RTL8201E:
1595 	case PHY_ID_BCMAC131:
1596 		phydev->interface = PHY_INTERFACE_MODE_MII;
1597 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1598 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1599 		break;
1600 	}
1601 
1602 	tg3_flag_set(tp, MDIOBUS_INITED);
1603 
1604 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1605 		tg3_mdio_config_5785(tp);
1606 
1607 	return 0;
1608 }
1609 
tg3_mdio_fini(struct tg3 * tp)1610 static void tg3_mdio_fini(struct tg3 *tp)
1611 {
1612 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1613 		tg3_flag_clear(tp, MDIOBUS_INITED);
1614 		mdiobus_unregister(tp->mdio_bus);
1615 		mdiobus_free(tp->mdio_bus);
1616 	}
1617 }
1618 
1619 /* tp->lock is held. */
tg3_generate_fw_event(struct tg3 * tp)1620 static inline void tg3_generate_fw_event(struct tg3 *tp)
1621 {
1622 	u32 val;
1623 
1624 	val = tr32(GRC_RX_CPU_EVENT);
1625 	val |= GRC_RX_CPU_DRIVER_EVENT;
1626 	tw32_f(GRC_RX_CPU_EVENT, val);
1627 
1628 	tp->last_event_jiffies = jiffies;
1629 }
1630 
1631 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1632 
1633 /* tp->lock is held. */
tg3_wait_for_event_ack(struct tg3 * tp)1634 static void tg3_wait_for_event_ack(struct tg3 *tp)
1635 {
1636 	int i;
1637 	unsigned int delay_cnt;
1638 	long time_remain;
1639 
1640 	/* If enough time has passed, no wait is necessary. */
1641 	time_remain = (long)(tp->last_event_jiffies + 1 +
1642 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1643 		      (long)jiffies;
1644 	if (time_remain < 0)
1645 		return;
1646 
1647 	/* Check if we can shorten the wait time. */
1648 	delay_cnt = jiffies_to_usecs(time_remain);
1649 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1650 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1651 	delay_cnt = (delay_cnt >> 3) + 1;
1652 
1653 	for (i = 0; i < delay_cnt; i++) {
1654 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1655 			break;
1656 		if (pci_channel_offline(tp->pdev))
1657 			break;
1658 
1659 		udelay(8);
1660 	}
1661 }
1662 
1663 /* tp->lock is held. */
tg3_phy_gather_ump_data(struct tg3 * tp,u32 * data)1664 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1665 {
1666 	u32 reg, val;
1667 
1668 	val = 0;
1669 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1670 		val = reg << 16;
1671 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1672 		val |= (reg & 0xffff);
1673 	*data++ = val;
1674 
1675 	val = 0;
1676 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1677 		val = reg << 16;
1678 	if (!tg3_readphy(tp, MII_LPA, &reg))
1679 		val |= (reg & 0xffff);
1680 	*data++ = val;
1681 
1682 	val = 0;
1683 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1684 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1685 			val = reg << 16;
1686 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1687 			val |= (reg & 0xffff);
1688 	}
1689 	*data++ = val;
1690 
1691 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1692 		val = reg << 16;
1693 	else
1694 		val = 0;
1695 	*data++ = val;
1696 }
1697 
1698 /* tp->lock is held. */
tg3_ump_link_report(struct tg3 * tp)1699 static void tg3_ump_link_report(struct tg3 *tp)
1700 {
1701 	u32 data[4];
1702 
1703 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1704 		return;
1705 
1706 	tg3_phy_gather_ump_data(tp, data);
1707 
1708 	tg3_wait_for_event_ack(tp);
1709 
1710 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1711 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1712 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1713 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1714 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1715 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1716 
1717 	tg3_generate_fw_event(tp);
1718 }
1719 
1720 /* tp->lock is held. */
tg3_stop_fw(struct tg3 * tp)1721 static void tg3_stop_fw(struct tg3 *tp)
1722 {
1723 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1724 		/* Wait for RX cpu to ACK the previous event. */
1725 		tg3_wait_for_event_ack(tp);
1726 
1727 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1728 
1729 		tg3_generate_fw_event(tp);
1730 
1731 		/* Wait for RX cpu to ACK this event. */
1732 		tg3_wait_for_event_ack(tp);
1733 	}
1734 }
1735 
1736 /* tp->lock is held. */
tg3_write_sig_pre_reset(struct tg3 * tp,int kind)1737 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1738 {
1739 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1740 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1741 
1742 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1743 		switch (kind) {
1744 		case RESET_KIND_INIT:
1745 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1746 				      DRV_STATE_START);
1747 			break;
1748 
1749 		case RESET_KIND_SHUTDOWN:
1750 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1751 				      DRV_STATE_UNLOAD);
1752 			break;
1753 
1754 		case RESET_KIND_SUSPEND:
1755 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1756 				      DRV_STATE_SUSPEND);
1757 			break;
1758 
1759 		default:
1760 			break;
1761 		}
1762 	}
1763 }
1764 
1765 /* tp->lock is held. */
tg3_write_sig_post_reset(struct tg3 * tp,int kind)1766 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1767 {
1768 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1769 		switch (kind) {
1770 		case RESET_KIND_INIT:
1771 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1772 				      DRV_STATE_START_DONE);
1773 			break;
1774 
1775 		case RESET_KIND_SHUTDOWN:
1776 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1777 				      DRV_STATE_UNLOAD_DONE);
1778 			break;
1779 
1780 		default:
1781 			break;
1782 		}
1783 	}
1784 }
1785 
1786 /* tp->lock is held. */
tg3_write_sig_legacy(struct tg3 * tp,int kind)1787 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1788 {
1789 	if (tg3_flag(tp, ENABLE_ASF)) {
1790 		switch (kind) {
1791 		case RESET_KIND_INIT:
1792 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1793 				      DRV_STATE_START);
1794 			break;
1795 
1796 		case RESET_KIND_SHUTDOWN:
1797 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 				      DRV_STATE_UNLOAD);
1799 			break;
1800 
1801 		case RESET_KIND_SUSPEND:
1802 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1803 				      DRV_STATE_SUSPEND);
1804 			break;
1805 
1806 		default:
1807 			break;
1808 		}
1809 	}
1810 }
1811 
tg3_poll_fw(struct tg3 * tp)1812 static int tg3_poll_fw(struct tg3 *tp)
1813 {
1814 	int i;
1815 	u32 val;
1816 
1817 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1818 		return 0;
1819 
1820 	if (tg3_flag(tp, IS_SSB_CORE)) {
1821 		/* We don't use firmware. */
1822 		return 0;
1823 	}
1824 
1825 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1826 		/* Wait up to 20ms for init done. */
1827 		for (i = 0; i < 200; i++) {
1828 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1829 				return 0;
1830 			if (pci_channel_offline(tp->pdev))
1831 				return -ENODEV;
1832 
1833 			udelay(100);
1834 		}
1835 		return -ENODEV;
1836 	}
1837 
1838 	/* Wait for firmware initialization to complete. */
1839 	for (i = 0; i < 100000; i++) {
1840 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1841 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1842 			break;
1843 		if (pci_channel_offline(tp->pdev)) {
1844 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1845 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1846 				netdev_info(tp->dev, "No firmware running\n");
1847 			}
1848 
1849 			break;
1850 		}
1851 
1852 		udelay(10);
1853 	}
1854 
1855 	/* Chip might not be fitted with firmware.  Some Sun onboard
1856 	 * parts are configured like that.  So don't signal the timeout
1857 	 * of the above loop as an error, but do report the lack of
1858 	 * running firmware once.
1859 	 */
1860 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1861 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1862 
1863 		netdev_info(tp->dev, "No firmware running\n");
1864 	}
1865 
1866 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1867 		/* The 57765 A0 needs a little more
1868 		 * time to do some important work.
1869 		 */
1870 		mdelay(10);
1871 	}
1872 
1873 	return 0;
1874 }
1875 
tg3_link_report(struct tg3 * tp)1876 static void tg3_link_report(struct tg3 *tp)
1877 {
1878 	if (!netif_carrier_ok(tp->dev)) {
1879 		netif_info(tp, link, tp->dev, "Link is down\n");
1880 		tg3_ump_link_report(tp);
1881 	} else if (netif_msg_link(tp)) {
1882 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1883 			    (tp->link_config.active_speed == SPEED_1000 ?
1884 			     1000 :
1885 			     (tp->link_config.active_speed == SPEED_100 ?
1886 			      100 : 10)),
1887 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1888 			     "full" : "half"));
1889 
1890 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1891 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1892 			    "on" : "off",
1893 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1894 			    "on" : "off");
1895 
1896 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1897 			netdev_info(tp->dev, "EEE is %s\n",
1898 				    tp->setlpicnt ? "enabled" : "disabled");
1899 
1900 		tg3_ump_link_report(tp);
1901 	}
1902 
1903 	tp->link_up = netif_carrier_ok(tp->dev);
1904 }
1905 
tg3_decode_flowctrl_1000T(u32 adv)1906 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1907 {
1908 	u32 flowctrl = 0;
1909 
1910 	if (adv & ADVERTISE_PAUSE_CAP) {
1911 		flowctrl |= FLOW_CTRL_RX;
1912 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1913 			flowctrl |= FLOW_CTRL_TX;
1914 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1915 		flowctrl |= FLOW_CTRL_TX;
1916 
1917 	return flowctrl;
1918 }
1919 
tg3_advert_flowctrl_1000X(u8 flow_ctrl)1920 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1921 {
1922 	u16 miireg;
1923 
1924 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1925 		miireg = ADVERTISE_1000XPAUSE;
1926 	else if (flow_ctrl & FLOW_CTRL_TX)
1927 		miireg = ADVERTISE_1000XPSE_ASYM;
1928 	else if (flow_ctrl & FLOW_CTRL_RX)
1929 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1930 	else
1931 		miireg = 0;
1932 
1933 	return miireg;
1934 }
1935 
tg3_decode_flowctrl_1000X(u32 adv)1936 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1937 {
1938 	u32 flowctrl = 0;
1939 
1940 	if (adv & ADVERTISE_1000XPAUSE) {
1941 		flowctrl |= FLOW_CTRL_RX;
1942 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1943 			flowctrl |= FLOW_CTRL_TX;
1944 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1945 		flowctrl |= FLOW_CTRL_TX;
1946 
1947 	return flowctrl;
1948 }
1949 
tg3_resolve_flowctrl_1000X(u16 lcladv,u16 rmtadv)1950 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1951 {
1952 	u8 cap = 0;
1953 
1954 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1955 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1956 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1957 		if (lcladv & ADVERTISE_1000XPAUSE)
1958 			cap = FLOW_CTRL_RX;
1959 		if (rmtadv & ADVERTISE_1000XPAUSE)
1960 			cap = FLOW_CTRL_TX;
1961 	}
1962 
1963 	return cap;
1964 }
1965 
tg3_setup_flow_control(struct tg3 * tp,u32 lcladv,u32 rmtadv)1966 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1967 {
1968 	u8 autoneg;
1969 	u8 flowctrl = 0;
1970 	u32 old_rx_mode = tp->rx_mode;
1971 	u32 old_tx_mode = tp->tx_mode;
1972 
1973 	if (tg3_flag(tp, USE_PHYLIB))
1974 		autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1975 	else
1976 		autoneg = tp->link_config.autoneg;
1977 
1978 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1979 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1980 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1981 		else
1982 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1983 	} else
1984 		flowctrl = tp->link_config.flowctrl;
1985 
1986 	tp->link_config.active_flowctrl = flowctrl;
1987 
1988 	if (flowctrl & FLOW_CTRL_RX)
1989 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1990 	else
1991 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1992 
1993 	if (old_rx_mode != tp->rx_mode)
1994 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1995 
1996 	if (flowctrl & FLOW_CTRL_TX)
1997 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1998 	else
1999 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2000 
2001 	if (old_tx_mode != tp->tx_mode)
2002 		tw32_f(MAC_TX_MODE, tp->tx_mode);
2003 }
2004 
tg3_adjust_link(struct net_device * dev)2005 static void tg3_adjust_link(struct net_device *dev)
2006 {
2007 	u8 oldflowctrl, linkmesg = 0;
2008 	u32 mac_mode, lcl_adv, rmt_adv;
2009 	struct tg3 *tp = netdev_priv(dev);
2010 	struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2011 
2012 	spin_lock_bh(&tp->lock);
2013 
2014 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2015 				    MAC_MODE_HALF_DUPLEX);
2016 
2017 	oldflowctrl = tp->link_config.active_flowctrl;
2018 
2019 	if (phydev->link) {
2020 		lcl_adv = 0;
2021 		rmt_adv = 0;
2022 
2023 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2024 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2025 		else if (phydev->speed == SPEED_1000 ||
2026 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2027 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2028 		else
2029 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2030 
2031 		if (phydev->duplex == DUPLEX_HALF)
2032 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2033 		else {
2034 			lcl_adv = mii_advertise_flowctrl(
2035 				  tp->link_config.flowctrl);
2036 
2037 			if (phydev->pause)
2038 				rmt_adv = LPA_PAUSE_CAP;
2039 			if (phydev->asym_pause)
2040 				rmt_adv |= LPA_PAUSE_ASYM;
2041 		}
2042 
2043 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2044 	} else
2045 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2046 
2047 	if (mac_mode != tp->mac_mode) {
2048 		tp->mac_mode = mac_mode;
2049 		tw32_f(MAC_MODE, tp->mac_mode);
2050 		udelay(40);
2051 	}
2052 
2053 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2054 		if (phydev->speed == SPEED_10)
2055 			tw32(MAC_MI_STAT,
2056 			     MAC_MI_STAT_10MBPS_MODE |
2057 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2058 		else
2059 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2060 	}
2061 
2062 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2063 		tw32(MAC_TX_LENGTHS,
2064 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2065 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2066 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2067 	else
2068 		tw32(MAC_TX_LENGTHS,
2069 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2070 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2071 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2072 
2073 	if (phydev->link != tp->old_link ||
2074 	    phydev->speed != tp->link_config.active_speed ||
2075 	    phydev->duplex != tp->link_config.active_duplex ||
2076 	    oldflowctrl != tp->link_config.active_flowctrl)
2077 		linkmesg = 1;
2078 
2079 	tp->old_link = phydev->link;
2080 	tp->link_config.active_speed = phydev->speed;
2081 	tp->link_config.active_duplex = phydev->duplex;
2082 
2083 	spin_unlock_bh(&tp->lock);
2084 
2085 	if (linkmesg)
2086 		tg3_link_report(tp);
2087 }
2088 
tg3_phy_init(struct tg3 * tp)2089 static int tg3_phy_init(struct tg3 *tp)
2090 {
2091 	struct phy_device *phydev;
2092 
2093 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2094 		return 0;
2095 
2096 	/* Bring the PHY back to a known state. */
2097 	tg3_bmcr_reset(tp);
2098 
2099 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2100 
2101 	/* Attach the MAC to the PHY. */
2102 	phydev = phy_connect(tp->dev, phydev_name(phydev),
2103 			     tg3_adjust_link, phydev->interface);
2104 	if (IS_ERR(phydev)) {
2105 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2106 		return PTR_ERR(phydev);
2107 	}
2108 
2109 	/* Mask with MAC supported features. */
2110 	switch (phydev->interface) {
2111 	case PHY_INTERFACE_MODE_GMII:
2112 	case PHY_INTERFACE_MODE_RGMII:
2113 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2114 			phy_set_max_speed(phydev, SPEED_1000);
2115 			phy_support_asym_pause(phydev);
2116 			break;
2117 		}
2118 		fallthrough;
2119 	case PHY_INTERFACE_MODE_MII:
2120 		phy_set_max_speed(phydev, SPEED_100);
2121 		phy_support_asym_pause(phydev);
2122 		break;
2123 	default:
2124 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2125 		return -EINVAL;
2126 	}
2127 
2128 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2129 
2130 	phy_attached_info(phydev);
2131 
2132 	return 0;
2133 }
2134 
tg3_phy_start(struct tg3 * tp)2135 static void tg3_phy_start(struct tg3 *tp)
2136 {
2137 	struct phy_device *phydev;
2138 
2139 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2140 		return;
2141 
2142 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2143 
2144 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2145 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2146 		phydev->speed = tp->link_config.speed;
2147 		phydev->duplex = tp->link_config.duplex;
2148 		phydev->autoneg = tp->link_config.autoneg;
2149 		ethtool_convert_legacy_u32_to_link_mode(
2150 			phydev->advertising, tp->link_config.advertising);
2151 	}
2152 
2153 	phy_start(phydev);
2154 
2155 	phy_start_aneg(phydev);
2156 }
2157 
tg3_phy_stop(struct tg3 * tp)2158 static void tg3_phy_stop(struct tg3 *tp)
2159 {
2160 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2161 		return;
2162 
2163 	phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2164 }
2165 
tg3_phy_fini(struct tg3 * tp)2166 static void tg3_phy_fini(struct tg3 *tp)
2167 {
2168 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2169 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2170 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2171 	}
2172 }
2173 
tg3_phy_set_extloopbk(struct tg3 * tp)2174 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2175 {
2176 	int err;
2177 	u32 val;
2178 
2179 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2180 		return 0;
2181 
2182 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2183 		/* Cannot do read-modify-write on 5401 */
2184 		err = tg3_phy_auxctl_write(tp,
2185 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2186 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2187 					   0x4c20);
2188 		goto done;
2189 	}
2190 
2191 	err = tg3_phy_auxctl_read(tp,
2192 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2193 	if (err)
2194 		return err;
2195 
2196 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2197 	err = tg3_phy_auxctl_write(tp,
2198 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2199 
2200 done:
2201 	return err;
2202 }
2203 
tg3_phy_fet_toggle_apd(struct tg3 * tp,bool enable)2204 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2205 {
2206 	u32 phytest;
2207 
2208 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2209 		u32 phy;
2210 
2211 		tg3_writephy(tp, MII_TG3_FET_TEST,
2212 			     phytest | MII_TG3_FET_SHADOW_EN);
2213 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2214 			if (enable)
2215 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2216 			else
2217 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2218 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2219 		}
2220 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2221 	}
2222 }
2223 
tg3_phy_toggle_apd(struct tg3 * tp,bool enable)2224 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2225 {
2226 	u32 reg;
2227 
2228 	if (!tg3_flag(tp, 5705_PLUS) ||
2229 	    (tg3_flag(tp, 5717_PLUS) &&
2230 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2231 		return;
2232 
2233 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2234 		tg3_phy_fet_toggle_apd(tp, enable);
2235 		return;
2236 	}
2237 
2238 	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2239 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2240 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2241 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2242 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2243 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2244 
2245 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2246 
2247 
2248 	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2249 	if (enable)
2250 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2251 
2252 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2253 }
2254 
tg3_phy_toggle_automdix(struct tg3 * tp,bool enable)2255 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2256 {
2257 	u32 phy;
2258 
2259 	if (!tg3_flag(tp, 5705_PLUS) ||
2260 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2261 		return;
2262 
2263 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2264 		u32 ephy;
2265 
2266 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2267 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2268 
2269 			tg3_writephy(tp, MII_TG3_FET_TEST,
2270 				     ephy | MII_TG3_FET_SHADOW_EN);
2271 			if (!tg3_readphy(tp, reg, &phy)) {
2272 				if (enable)
2273 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2274 				else
2275 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2276 				tg3_writephy(tp, reg, phy);
2277 			}
2278 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2279 		}
2280 	} else {
2281 		int ret;
2282 
2283 		ret = tg3_phy_auxctl_read(tp,
2284 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2285 		if (!ret) {
2286 			if (enable)
2287 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2288 			else
2289 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2290 			tg3_phy_auxctl_write(tp,
2291 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2292 		}
2293 	}
2294 }
2295 
tg3_phy_set_wirespeed(struct tg3 * tp)2296 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2297 {
2298 	int ret;
2299 	u32 val;
2300 
2301 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2302 		return;
2303 
2304 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2305 	if (!ret)
2306 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2307 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2308 }
2309 
tg3_phy_apply_otp(struct tg3 * tp)2310 static void tg3_phy_apply_otp(struct tg3 *tp)
2311 {
2312 	u32 otp, phy;
2313 
2314 	if (!tp->phy_otp)
2315 		return;
2316 
2317 	otp = tp->phy_otp;
2318 
2319 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2320 		return;
2321 
2322 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2323 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2324 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2325 
2326 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2327 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2328 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2329 
2330 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2331 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2332 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2333 
2334 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2335 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2336 
2337 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2338 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2339 
2340 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2341 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2342 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2343 
2344 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2345 }
2346 
tg3_eee_pull_config(struct tg3 * tp,struct ethtool_eee * eee)2347 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2348 {
2349 	u32 val;
2350 	struct ethtool_eee *dest = &tp->eee;
2351 
2352 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2353 		return;
2354 
2355 	if (eee)
2356 		dest = eee;
2357 
2358 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2359 		return;
2360 
2361 	/* Pull eee_active */
2362 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2363 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2364 		dest->eee_active = 1;
2365 	} else
2366 		dest->eee_active = 0;
2367 
2368 	/* Pull lp advertised settings */
2369 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2370 		return;
2371 	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2372 
2373 	/* Pull advertised and eee_enabled settings */
2374 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2375 		return;
2376 	dest->eee_enabled = !!val;
2377 	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2378 
2379 	/* Pull tx_lpi_enabled */
2380 	val = tr32(TG3_CPMU_EEE_MODE);
2381 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2382 
2383 	/* Pull lpi timer value */
2384 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2385 }
2386 
tg3_phy_eee_adjust(struct tg3 * tp,bool current_link_up)2387 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2388 {
2389 	u32 val;
2390 
2391 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2392 		return;
2393 
2394 	tp->setlpicnt = 0;
2395 
2396 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2397 	    current_link_up &&
2398 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2399 	    (tp->link_config.active_speed == SPEED_100 ||
2400 	     tp->link_config.active_speed == SPEED_1000)) {
2401 		u32 eeectl;
2402 
2403 		if (tp->link_config.active_speed == SPEED_1000)
2404 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2405 		else
2406 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2407 
2408 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2409 
2410 		tg3_eee_pull_config(tp, NULL);
2411 		if (tp->eee.eee_active)
2412 			tp->setlpicnt = 2;
2413 	}
2414 
2415 	if (!tp->setlpicnt) {
2416 		if (current_link_up &&
2417 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2418 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2419 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2420 		}
2421 
2422 		val = tr32(TG3_CPMU_EEE_MODE);
2423 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2424 	}
2425 }
2426 
tg3_phy_eee_enable(struct tg3 * tp)2427 static void tg3_phy_eee_enable(struct tg3 *tp)
2428 {
2429 	u32 val;
2430 
2431 	if (tp->link_config.active_speed == SPEED_1000 &&
2432 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2433 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2434 	     tg3_flag(tp, 57765_CLASS)) &&
2435 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2436 		val = MII_TG3_DSP_TAP26_ALNOKO |
2437 		      MII_TG3_DSP_TAP26_RMRXSTO;
2438 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2439 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2440 	}
2441 
2442 	val = tr32(TG3_CPMU_EEE_MODE);
2443 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2444 }
2445 
tg3_wait_macro_done(struct tg3 * tp)2446 static int tg3_wait_macro_done(struct tg3 *tp)
2447 {
2448 	int limit = 100;
2449 
2450 	while (limit--) {
2451 		u32 tmp32;
2452 
2453 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2454 			if ((tmp32 & 0x1000) == 0)
2455 				break;
2456 		}
2457 	}
2458 	if (limit < 0)
2459 		return -EBUSY;
2460 
2461 	return 0;
2462 }
2463 
tg3_phy_write_and_check_testpat(struct tg3 * tp,int * resetp)2464 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2465 {
2466 	static const u32 test_pat[4][6] = {
2467 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2468 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2469 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2470 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2471 	};
2472 	int chan;
2473 
2474 	for (chan = 0; chan < 4; chan++) {
2475 		int i;
2476 
2477 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2478 			     (chan * 0x2000) | 0x0200);
2479 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2480 
2481 		for (i = 0; i < 6; i++)
2482 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2483 				     test_pat[chan][i]);
2484 
2485 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2486 		if (tg3_wait_macro_done(tp)) {
2487 			*resetp = 1;
2488 			return -EBUSY;
2489 		}
2490 
2491 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2492 			     (chan * 0x2000) | 0x0200);
2493 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2494 		if (tg3_wait_macro_done(tp)) {
2495 			*resetp = 1;
2496 			return -EBUSY;
2497 		}
2498 
2499 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2500 		if (tg3_wait_macro_done(tp)) {
2501 			*resetp = 1;
2502 			return -EBUSY;
2503 		}
2504 
2505 		for (i = 0; i < 6; i += 2) {
2506 			u32 low, high;
2507 
2508 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2509 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2510 			    tg3_wait_macro_done(tp)) {
2511 				*resetp = 1;
2512 				return -EBUSY;
2513 			}
2514 			low &= 0x7fff;
2515 			high &= 0x000f;
2516 			if (low != test_pat[chan][i] ||
2517 			    high != test_pat[chan][i+1]) {
2518 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2519 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2520 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2521 
2522 				return -EBUSY;
2523 			}
2524 		}
2525 	}
2526 
2527 	return 0;
2528 }
2529 
tg3_phy_reset_chanpat(struct tg3 * tp)2530 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2531 {
2532 	int chan;
2533 
2534 	for (chan = 0; chan < 4; chan++) {
2535 		int i;
2536 
2537 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2538 			     (chan * 0x2000) | 0x0200);
2539 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2540 		for (i = 0; i < 6; i++)
2541 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2542 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2543 		if (tg3_wait_macro_done(tp))
2544 			return -EBUSY;
2545 	}
2546 
2547 	return 0;
2548 }
2549 
tg3_phy_reset_5703_4_5(struct tg3 * tp)2550 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2551 {
2552 	u32 reg32, phy9_orig;
2553 	int retries, do_phy_reset, err;
2554 
2555 	retries = 10;
2556 	do_phy_reset = 1;
2557 	do {
2558 		if (do_phy_reset) {
2559 			err = tg3_bmcr_reset(tp);
2560 			if (err)
2561 				return err;
2562 			do_phy_reset = 0;
2563 		}
2564 
2565 		/* Disable transmitter and interrupt.  */
2566 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2567 			continue;
2568 
2569 		reg32 |= 0x3000;
2570 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2571 
2572 		/* Set full-duplex, 1000 mbps.  */
2573 		tg3_writephy(tp, MII_BMCR,
2574 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2575 
2576 		/* Set to master mode.  */
2577 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2578 			continue;
2579 
2580 		tg3_writephy(tp, MII_CTRL1000,
2581 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2582 
2583 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2584 		if (err)
2585 			return err;
2586 
2587 		/* Block the PHY control access.  */
2588 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2589 
2590 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2591 		if (!err)
2592 			break;
2593 	} while (--retries);
2594 
2595 	err = tg3_phy_reset_chanpat(tp);
2596 	if (err)
2597 		return err;
2598 
2599 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2600 
2601 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2602 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2603 
2604 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2605 
2606 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2607 
2608 	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2609 	if (err)
2610 		return err;
2611 
2612 	reg32 &= ~0x3000;
2613 	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2614 
2615 	return 0;
2616 }
2617 
tg3_carrier_off(struct tg3 * tp)2618 static void tg3_carrier_off(struct tg3 *tp)
2619 {
2620 	netif_carrier_off(tp->dev);
2621 	tp->link_up = false;
2622 }
2623 
tg3_warn_mgmt_link_flap(struct tg3 * tp)2624 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2625 {
2626 	if (tg3_flag(tp, ENABLE_ASF))
2627 		netdev_warn(tp->dev,
2628 			    "Management side-band traffic will be interrupted during phy settings change\n");
2629 }
2630 
2631 /* This will reset the tigon3 PHY if there is no valid
2632  * link unless the FORCE argument is non-zero.
2633  */
tg3_phy_reset(struct tg3 * tp)2634 static int tg3_phy_reset(struct tg3 *tp)
2635 {
2636 	u32 val, cpmuctrl;
2637 	int err;
2638 
2639 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2640 		val = tr32(GRC_MISC_CFG);
2641 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2642 		udelay(40);
2643 	}
2644 	err  = tg3_readphy(tp, MII_BMSR, &val);
2645 	err |= tg3_readphy(tp, MII_BMSR, &val);
2646 	if (err != 0)
2647 		return -EBUSY;
2648 
2649 	if (netif_running(tp->dev) && tp->link_up) {
2650 		netif_carrier_off(tp->dev);
2651 		tg3_link_report(tp);
2652 	}
2653 
2654 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2655 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2656 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2657 		err = tg3_phy_reset_5703_4_5(tp);
2658 		if (err)
2659 			return err;
2660 		goto out;
2661 	}
2662 
2663 	cpmuctrl = 0;
2664 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2665 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2666 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2667 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2668 			tw32(TG3_CPMU_CTRL,
2669 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2670 	}
2671 
2672 	err = tg3_bmcr_reset(tp);
2673 	if (err)
2674 		return err;
2675 
2676 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2677 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2678 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2679 
2680 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2681 	}
2682 
2683 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2684 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2685 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2686 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2687 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2688 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2689 			udelay(40);
2690 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2691 		}
2692 	}
2693 
2694 	if (tg3_flag(tp, 5717_PLUS) &&
2695 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2696 		return 0;
2697 
2698 	tg3_phy_apply_otp(tp);
2699 
2700 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2701 		tg3_phy_toggle_apd(tp, true);
2702 	else
2703 		tg3_phy_toggle_apd(tp, false);
2704 
2705 out:
2706 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2707 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2708 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2709 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2710 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2711 	}
2712 
2713 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2714 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2715 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2716 	}
2717 
2718 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2719 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2720 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2721 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2722 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2723 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2724 		}
2725 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2726 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2727 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2728 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2729 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2730 				tg3_writephy(tp, MII_TG3_TEST1,
2731 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2732 			} else
2733 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2734 
2735 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2736 		}
2737 	}
2738 
2739 	/* Set Extended packet length bit (bit 14) on all chips that */
2740 	/* support jumbo frames */
2741 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2742 		/* Cannot do read-modify-write on 5401 */
2743 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2744 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2745 		/* Set bit 14 with read-modify-write to preserve other bits */
2746 		err = tg3_phy_auxctl_read(tp,
2747 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2748 		if (!err)
2749 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2750 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2751 	}
2752 
2753 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2754 	 * jumbo frames transmission.
2755 	 */
2756 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2757 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2758 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2759 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2760 	}
2761 
2762 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2763 		/* adjust output voltage */
2764 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2765 	}
2766 
2767 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2768 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2769 
2770 	tg3_phy_toggle_automdix(tp, true);
2771 	tg3_phy_set_wirespeed(tp);
2772 	return 0;
2773 }
2774 
2775 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2776 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2777 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2778 					  TG3_GPIO_MSG_NEED_VAUX)
2779 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2780 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2781 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2782 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2783 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2784 
2785 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2786 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2787 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2788 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2789 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2790 
tg3_set_function_status(struct tg3 * tp,u32 newstat)2791 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2792 {
2793 	u32 status, shift;
2794 
2795 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2796 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2797 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2798 	else
2799 		status = tr32(TG3_CPMU_DRV_STATUS);
2800 
2801 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2802 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2803 	status |= (newstat << shift);
2804 
2805 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2806 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2807 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2808 	else
2809 		tw32(TG3_CPMU_DRV_STATUS, status);
2810 
2811 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2812 }
2813 
tg3_pwrsrc_switch_to_vmain(struct tg3 * tp)2814 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2815 {
2816 	if (!tg3_flag(tp, IS_NIC))
2817 		return 0;
2818 
2819 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2820 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2821 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2822 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2823 			return -EIO;
2824 
2825 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2826 
2827 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2828 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2829 
2830 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2831 	} else {
2832 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2833 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2834 	}
2835 
2836 	return 0;
2837 }
2838 
tg3_pwrsrc_die_with_vmain(struct tg3 * tp)2839 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2840 {
2841 	u32 grc_local_ctrl;
2842 
2843 	if (!tg3_flag(tp, IS_NIC) ||
2844 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2845 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2846 		return;
2847 
2848 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2849 
2850 	tw32_wait_f(GRC_LOCAL_CTRL,
2851 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2852 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2853 
2854 	tw32_wait_f(GRC_LOCAL_CTRL,
2855 		    grc_local_ctrl,
2856 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2857 
2858 	tw32_wait_f(GRC_LOCAL_CTRL,
2859 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2860 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2861 }
2862 
tg3_pwrsrc_switch_to_vaux(struct tg3 * tp)2863 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2864 {
2865 	if (!tg3_flag(tp, IS_NIC))
2866 		return;
2867 
2868 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2869 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2870 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2871 			    (GRC_LCLCTRL_GPIO_OE0 |
2872 			     GRC_LCLCTRL_GPIO_OE1 |
2873 			     GRC_LCLCTRL_GPIO_OE2 |
2874 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2875 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2876 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2877 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2878 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2879 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2880 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2881 				     GRC_LCLCTRL_GPIO_OE1 |
2882 				     GRC_LCLCTRL_GPIO_OE2 |
2883 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2884 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2885 				     tp->grc_local_ctrl;
2886 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2887 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2888 
2889 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2890 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2891 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2892 
2893 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2894 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2895 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2896 	} else {
2897 		u32 no_gpio2;
2898 		u32 grc_local_ctrl = 0;
2899 
2900 		/* Workaround to prevent overdrawing Amps. */
2901 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2902 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2903 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2904 				    grc_local_ctrl,
2905 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2906 		}
2907 
2908 		/* On 5753 and variants, GPIO2 cannot be used. */
2909 		no_gpio2 = tp->nic_sram_data_cfg &
2910 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2911 
2912 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2913 				  GRC_LCLCTRL_GPIO_OE1 |
2914 				  GRC_LCLCTRL_GPIO_OE2 |
2915 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2916 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2917 		if (no_gpio2) {
2918 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2919 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2920 		}
2921 		tw32_wait_f(GRC_LOCAL_CTRL,
2922 			    tp->grc_local_ctrl | grc_local_ctrl,
2923 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2924 
2925 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2926 
2927 		tw32_wait_f(GRC_LOCAL_CTRL,
2928 			    tp->grc_local_ctrl | grc_local_ctrl,
2929 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2930 
2931 		if (!no_gpio2) {
2932 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2933 			tw32_wait_f(GRC_LOCAL_CTRL,
2934 				    tp->grc_local_ctrl | grc_local_ctrl,
2935 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2936 		}
2937 	}
2938 }
2939 
tg3_frob_aux_power_5717(struct tg3 * tp,bool wol_enable)2940 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2941 {
2942 	u32 msg = 0;
2943 
2944 	/* Serialize power state transitions */
2945 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2946 		return;
2947 
2948 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2949 		msg = TG3_GPIO_MSG_NEED_VAUX;
2950 
2951 	msg = tg3_set_function_status(tp, msg);
2952 
2953 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2954 		goto done;
2955 
2956 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2957 		tg3_pwrsrc_switch_to_vaux(tp);
2958 	else
2959 		tg3_pwrsrc_die_with_vmain(tp);
2960 
2961 done:
2962 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2963 }
2964 
tg3_frob_aux_power(struct tg3 * tp,bool include_wol)2965 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2966 {
2967 	bool need_vaux = false;
2968 
2969 	/* The GPIOs do something completely different on 57765. */
2970 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2971 		return;
2972 
2973 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2974 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2975 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2976 		tg3_frob_aux_power_5717(tp, include_wol ?
2977 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2978 		return;
2979 	}
2980 
2981 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2982 		struct net_device *dev_peer;
2983 
2984 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2985 
2986 		/* remove_one() may have been run on the peer. */
2987 		if (dev_peer) {
2988 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2989 
2990 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2991 				return;
2992 
2993 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2994 			    tg3_flag(tp_peer, ENABLE_ASF))
2995 				need_vaux = true;
2996 		}
2997 	}
2998 
2999 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3000 	    tg3_flag(tp, ENABLE_ASF))
3001 		need_vaux = true;
3002 
3003 	if (need_vaux)
3004 		tg3_pwrsrc_switch_to_vaux(tp);
3005 	else
3006 		tg3_pwrsrc_die_with_vmain(tp);
3007 }
3008 
tg3_5700_link_polarity(struct tg3 * tp,u32 speed)3009 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3010 {
3011 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3012 		return 1;
3013 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3014 		if (speed != SPEED_10)
3015 			return 1;
3016 	} else if (speed == SPEED_10)
3017 		return 1;
3018 
3019 	return 0;
3020 }
3021 
tg3_phy_power_bug(struct tg3 * tp)3022 static bool tg3_phy_power_bug(struct tg3 *tp)
3023 {
3024 	switch (tg3_asic_rev(tp)) {
3025 	case ASIC_REV_5700:
3026 	case ASIC_REV_5704:
3027 		return true;
3028 	case ASIC_REV_5780:
3029 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3030 			return true;
3031 		return false;
3032 	case ASIC_REV_5717:
3033 		if (!tp->pci_fn)
3034 			return true;
3035 		return false;
3036 	case ASIC_REV_5719:
3037 	case ASIC_REV_5720:
3038 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3039 		    !tp->pci_fn)
3040 			return true;
3041 		return false;
3042 	}
3043 
3044 	return false;
3045 }
3046 
tg3_phy_led_bug(struct tg3 * tp)3047 static bool tg3_phy_led_bug(struct tg3 *tp)
3048 {
3049 	switch (tg3_asic_rev(tp)) {
3050 	case ASIC_REV_5719:
3051 	case ASIC_REV_5720:
3052 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3053 		    !tp->pci_fn)
3054 			return true;
3055 		return false;
3056 	}
3057 
3058 	return false;
3059 }
3060 
tg3_power_down_phy(struct tg3 * tp,bool do_low_power)3061 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3062 {
3063 	u32 val;
3064 
3065 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3066 		return;
3067 
3068 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3069 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3070 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3071 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3072 
3073 			sg_dig_ctrl |=
3074 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3075 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3076 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3077 		}
3078 		return;
3079 	}
3080 
3081 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3082 		tg3_bmcr_reset(tp);
3083 		val = tr32(GRC_MISC_CFG);
3084 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3085 		udelay(40);
3086 		return;
3087 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3088 		u32 phytest;
3089 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3090 			u32 phy;
3091 
3092 			tg3_writephy(tp, MII_ADVERTISE, 0);
3093 			tg3_writephy(tp, MII_BMCR,
3094 				     BMCR_ANENABLE | BMCR_ANRESTART);
3095 
3096 			tg3_writephy(tp, MII_TG3_FET_TEST,
3097 				     phytest | MII_TG3_FET_SHADOW_EN);
3098 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3099 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3100 				tg3_writephy(tp,
3101 					     MII_TG3_FET_SHDW_AUXMODE4,
3102 					     phy);
3103 			}
3104 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3105 		}
3106 		return;
3107 	} else if (do_low_power) {
3108 		if (!tg3_phy_led_bug(tp))
3109 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3110 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3111 
3112 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3113 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3114 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3115 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3116 	}
3117 
3118 	/* The PHY should not be powered down on some chips because
3119 	 * of bugs.
3120 	 */
3121 	if (tg3_phy_power_bug(tp))
3122 		return;
3123 
3124 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3125 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3126 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3127 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3128 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3129 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3130 	}
3131 
3132 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3133 }
3134 
3135 /* tp->lock is held. */
tg3_nvram_lock(struct tg3 * tp)3136 static int tg3_nvram_lock(struct tg3 *tp)
3137 {
3138 	if (tg3_flag(tp, NVRAM)) {
3139 		int i;
3140 
3141 		if (tp->nvram_lock_cnt == 0) {
3142 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3143 			for (i = 0; i < 8000; i++) {
3144 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3145 					break;
3146 				udelay(20);
3147 			}
3148 			if (i == 8000) {
3149 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3150 				return -ENODEV;
3151 			}
3152 		}
3153 		tp->nvram_lock_cnt++;
3154 	}
3155 	return 0;
3156 }
3157 
3158 /* tp->lock is held. */
tg3_nvram_unlock(struct tg3 * tp)3159 static void tg3_nvram_unlock(struct tg3 *tp)
3160 {
3161 	if (tg3_flag(tp, NVRAM)) {
3162 		if (tp->nvram_lock_cnt > 0)
3163 			tp->nvram_lock_cnt--;
3164 		if (tp->nvram_lock_cnt == 0)
3165 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3166 	}
3167 }
3168 
3169 /* tp->lock is held. */
tg3_enable_nvram_access(struct tg3 * tp)3170 static void tg3_enable_nvram_access(struct tg3 *tp)
3171 {
3172 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3173 		u32 nvaccess = tr32(NVRAM_ACCESS);
3174 
3175 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3176 	}
3177 }
3178 
3179 /* tp->lock is held. */
tg3_disable_nvram_access(struct tg3 * tp)3180 static void tg3_disable_nvram_access(struct tg3 *tp)
3181 {
3182 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3183 		u32 nvaccess = tr32(NVRAM_ACCESS);
3184 
3185 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3186 	}
3187 }
3188 
tg3_nvram_read_using_eeprom(struct tg3 * tp,u32 offset,u32 * val)3189 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3190 					u32 offset, u32 *val)
3191 {
3192 	u32 tmp;
3193 	int i;
3194 
3195 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3196 		return -EINVAL;
3197 
3198 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3199 					EEPROM_ADDR_DEVID_MASK |
3200 					EEPROM_ADDR_READ);
3201 	tw32(GRC_EEPROM_ADDR,
3202 	     tmp |
3203 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3204 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3205 	      EEPROM_ADDR_ADDR_MASK) |
3206 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3207 
3208 	for (i = 0; i < 1000; i++) {
3209 		tmp = tr32(GRC_EEPROM_ADDR);
3210 
3211 		if (tmp & EEPROM_ADDR_COMPLETE)
3212 			break;
3213 		msleep(1);
3214 	}
3215 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3216 		return -EBUSY;
3217 
3218 	tmp = tr32(GRC_EEPROM_DATA);
3219 
3220 	/*
3221 	 * The data will always be opposite the native endian
3222 	 * format.  Perform a blind byteswap to compensate.
3223 	 */
3224 	*val = swab32(tmp);
3225 
3226 	return 0;
3227 }
3228 
3229 #define NVRAM_CMD_TIMEOUT 10000
3230 
tg3_nvram_exec_cmd(struct tg3 * tp,u32 nvram_cmd)3231 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3232 {
3233 	int i;
3234 
3235 	tw32(NVRAM_CMD, nvram_cmd);
3236 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3237 		usleep_range(10, 40);
3238 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3239 			udelay(10);
3240 			break;
3241 		}
3242 	}
3243 
3244 	if (i == NVRAM_CMD_TIMEOUT)
3245 		return -EBUSY;
3246 
3247 	return 0;
3248 }
3249 
tg3_nvram_phys_addr(struct tg3 * tp,u32 addr)3250 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3251 {
3252 	if (tg3_flag(tp, NVRAM) &&
3253 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3254 	    tg3_flag(tp, FLASH) &&
3255 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3256 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3257 
3258 		addr = ((addr / tp->nvram_pagesize) <<
3259 			ATMEL_AT45DB0X1B_PAGE_POS) +
3260 		       (addr % tp->nvram_pagesize);
3261 
3262 	return addr;
3263 }
3264 
tg3_nvram_logical_addr(struct tg3 * tp,u32 addr)3265 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3266 {
3267 	if (tg3_flag(tp, NVRAM) &&
3268 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3269 	    tg3_flag(tp, FLASH) &&
3270 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3271 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3272 
3273 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3274 			tp->nvram_pagesize) +
3275 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3276 
3277 	return addr;
3278 }
3279 
3280 /* NOTE: Data read in from NVRAM is byteswapped according to
3281  * the byteswapping settings for all other register accesses.
3282  * tg3 devices are BE devices, so on a BE machine, the data
3283  * returned will be exactly as it is seen in NVRAM.  On a LE
3284  * machine, the 32-bit value will be byteswapped.
3285  */
tg3_nvram_read(struct tg3 * tp,u32 offset,u32 * val)3286 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3287 {
3288 	int ret;
3289 
3290 	if (!tg3_flag(tp, NVRAM))
3291 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3292 
3293 	offset = tg3_nvram_phys_addr(tp, offset);
3294 
3295 	if (offset > NVRAM_ADDR_MSK)
3296 		return -EINVAL;
3297 
3298 	ret = tg3_nvram_lock(tp);
3299 	if (ret)
3300 		return ret;
3301 
3302 	tg3_enable_nvram_access(tp);
3303 
3304 	tw32(NVRAM_ADDR, offset);
3305 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3306 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3307 
3308 	if (ret == 0)
3309 		*val = tr32(NVRAM_RDDATA);
3310 
3311 	tg3_disable_nvram_access(tp);
3312 
3313 	tg3_nvram_unlock(tp);
3314 
3315 	return ret;
3316 }
3317 
3318 /* Ensures NVRAM data is in bytestream format. */
tg3_nvram_read_be32(struct tg3 * tp,u32 offset,__be32 * val)3319 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3320 {
3321 	u32 v;
3322 	int res = tg3_nvram_read(tp, offset, &v);
3323 	if (!res)
3324 		*val = cpu_to_be32(v);
3325 	return res;
3326 }
3327 
tg3_nvram_write_block_using_eeprom(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3328 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3329 				    u32 offset, u32 len, u8 *buf)
3330 {
3331 	int i, j, rc = 0;
3332 	u32 val;
3333 
3334 	for (i = 0; i < len; i += 4) {
3335 		u32 addr;
3336 		__be32 data;
3337 
3338 		addr = offset + i;
3339 
3340 		memcpy(&data, buf + i, 4);
3341 
3342 		/*
3343 		 * The SEEPROM interface expects the data to always be opposite
3344 		 * the native endian format.  We accomplish this by reversing
3345 		 * all the operations that would have been performed on the
3346 		 * data from a call to tg3_nvram_read_be32().
3347 		 */
3348 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3349 
3350 		val = tr32(GRC_EEPROM_ADDR);
3351 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3352 
3353 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3354 			EEPROM_ADDR_READ);
3355 		tw32(GRC_EEPROM_ADDR, val |
3356 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3357 			(addr & EEPROM_ADDR_ADDR_MASK) |
3358 			EEPROM_ADDR_START |
3359 			EEPROM_ADDR_WRITE);
3360 
3361 		for (j = 0; j < 1000; j++) {
3362 			val = tr32(GRC_EEPROM_ADDR);
3363 
3364 			if (val & EEPROM_ADDR_COMPLETE)
3365 				break;
3366 			msleep(1);
3367 		}
3368 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3369 			rc = -EBUSY;
3370 			break;
3371 		}
3372 	}
3373 
3374 	return rc;
3375 }
3376 
3377 /* offset and length are dword aligned */
tg3_nvram_write_block_unbuffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3378 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3379 		u8 *buf)
3380 {
3381 	int ret = 0;
3382 	u32 pagesize = tp->nvram_pagesize;
3383 	u32 pagemask = pagesize - 1;
3384 	u32 nvram_cmd;
3385 	u8 *tmp;
3386 
3387 	tmp = kmalloc(pagesize, GFP_KERNEL);
3388 	if (tmp == NULL)
3389 		return -ENOMEM;
3390 
3391 	while (len) {
3392 		int j;
3393 		u32 phy_addr, page_off, size;
3394 
3395 		phy_addr = offset & ~pagemask;
3396 
3397 		for (j = 0; j < pagesize; j += 4) {
3398 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3399 						  (__be32 *) (tmp + j));
3400 			if (ret)
3401 				break;
3402 		}
3403 		if (ret)
3404 			break;
3405 
3406 		page_off = offset & pagemask;
3407 		size = pagesize;
3408 		if (len < size)
3409 			size = len;
3410 
3411 		len -= size;
3412 
3413 		memcpy(tmp + page_off, buf, size);
3414 
3415 		offset = offset + (pagesize - page_off);
3416 
3417 		tg3_enable_nvram_access(tp);
3418 
3419 		/*
3420 		 * Before we can erase the flash page, we need
3421 		 * to issue a special "write enable" command.
3422 		 */
3423 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3424 
3425 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3426 			break;
3427 
3428 		/* Erase the target page */
3429 		tw32(NVRAM_ADDR, phy_addr);
3430 
3431 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3432 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3433 
3434 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3435 			break;
3436 
3437 		/* Issue another write enable to start the write. */
3438 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3439 
3440 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3441 			break;
3442 
3443 		for (j = 0; j < pagesize; j += 4) {
3444 			__be32 data;
3445 
3446 			data = *((__be32 *) (tmp + j));
3447 
3448 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3449 
3450 			tw32(NVRAM_ADDR, phy_addr + j);
3451 
3452 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3453 				NVRAM_CMD_WR;
3454 
3455 			if (j == 0)
3456 				nvram_cmd |= NVRAM_CMD_FIRST;
3457 			else if (j == (pagesize - 4))
3458 				nvram_cmd |= NVRAM_CMD_LAST;
3459 
3460 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3461 			if (ret)
3462 				break;
3463 		}
3464 		if (ret)
3465 			break;
3466 	}
3467 
3468 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3469 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3470 
3471 	kfree(tmp);
3472 
3473 	return ret;
3474 }
3475 
3476 /* offset and length are dword aligned */
tg3_nvram_write_block_buffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3477 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3478 		u8 *buf)
3479 {
3480 	int i, ret = 0;
3481 
3482 	for (i = 0; i < len; i += 4, offset += 4) {
3483 		u32 page_off, phy_addr, nvram_cmd;
3484 		__be32 data;
3485 
3486 		memcpy(&data, buf + i, 4);
3487 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3488 
3489 		page_off = offset % tp->nvram_pagesize;
3490 
3491 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3492 
3493 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3494 
3495 		if (page_off == 0 || i == 0)
3496 			nvram_cmd |= NVRAM_CMD_FIRST;
3497 		if (page_off == (tp->nvram_pagesize - 4))
3498 			nvram_cmd |= NVRAM_CMD_LAST;
3499 
3500 		if (i == (len - 4))
3501 			nvram_cmd |= NVRAM_CMD_LAST;
3502 
3503 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3504 		    !tg3_flag(tp, FLASH) ||
3505 		    !tg3_flag(tp, 57765_PLUS))
3506 			tw32(NVRAM_ADDR, phy_addr);
3507 
3508 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3509 		    !tg3_flag(tp, 5755_PLUS) &&
3510 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3511 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3512 			u32 cmd;
3513 
3514 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3515 			ret = tg3_nvram_exec_cmd(tp, cmd);
3516 			if (ret)
3517 				break;
3518 		}
3519 		if (!tg3_flag(tp, FLASH)) {
3520 			/* We always do complete word writes to eeprom. */
3521 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3522 		}
3523 
3524 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3525 		if (ret)
3526 			break;
3527 	}
3528 	return ret;
3529 }
3530 
3531 /* offset and length are dword aligned */
tg3_nvram_write_block(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3532 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3533 {
3534 	int ret;
3535 
3536 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3537 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3538 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3539 		udelay(40);
3540 	}
3541 
3542 	if (!tg3_flag(tp, NVRAM)) {
3543 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3544 	} else {
3545 		u32 grc_mode;
3546 
3547 		ret = tg3_nvram_lock(tp);
3548 		if (ret)
3549 			return ret;
3550 
3551 		tg3_enable_nvram_access(tp);
3552 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3553 			tw32(NVRAM_WRITE1, 0x406);
3554 
3555 		grc_mode = tr32(GRC_MODE);
3556 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3557 
3558 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3559 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3560 				buf);
3561 		} else {
3562 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3563 				buf);
3564 		}
3565 
3566 		grc_mode = tr32(GRC_MODE);
3567 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3568 
3569 		tg3_disable_nvram_access(tp);
3570 		tg3_nvram_unlock(tp);
3571 	}
3572 
3573 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3574 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3575 		udelay(40);
3576 	}
3577 
3578 	return ret;
3579 }
3580 
3581 #define RX_CPU_SCRATCH_BASE	0x30000
3582 #define RX_CPU_SCRATCH_SIZE	0x04000
3583 #define TX_CPU_SCRATCH_BASE	0x34000
3584 #define TX_CPU_SCRATCH_SIZE	0x04000
3585 
3586 /* tp->lock is held. */
tg3_pause_cpu(struct tg3 * tp,u32 cpu_base)3587 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3588 {
3589 	int i;
3590 	const int iters = 10000;
3591 
3592 	for (i = 0; i < iters; i++) {
3593 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3594 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3595 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3596 			break;
3597 		if (pci_channel_offline(tp->pdev))
3598 			return -EBUSY;
3599 	}
3600 
3601 	return (i == iters) ? -EBUSY : 0;
3602 }
3603 
3604 /* tp->lock is held. */
tg3_rxcpu_pause(struct tg3 * tp)3605 static int tg3_rxcpu_pause(struct tg3 *tp)
3606 {
3607 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3608 
3609 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3610 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3611 	udelay(10);
3612 
3613 	return rc;
3614 }
3615 
3616 /* tp->lock is held. */
tg3_txcpu_pause(struct tg3 * tp)3617 static int tg3_txcpu_pause(struct tg3 *tp)
3618 {
3619 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3620 }
3621 
3622 /* tp->lock is held. */
tg3_resume_cpu(struct tg3 * tp,u32 cpu_base)3623 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3624 {
3625 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3626 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3627 }
3628 
3629 /* tp->lock is held. */
tg3_rxcpu_resume(struct tg3 * tp)3630 static void tg3_rxcpu_resume(struct tg3 *tp)
3631 {
3632 	tg3_resume_cpu(tp, RX_CPU_BASE);
3633 }
3634 
3635 /* tp->lock is held. */
tg3_halt_cpu(struct tg3 * tp,u32 cpu_base)3636 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3637 {
3638 	int rc;
3639 
3640 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3641 
3642 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3643 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3644 
3645 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3646 		return 0;
3647 	}
3648 	if (cpu_base == RX_CPU_BASE) {
3649 		rc = tg3_rxcpu_pause(tp);
3650 	} else {
3651 		/*
3652 		 * There is only an Rx CPU for the 5750 derivative in the
3653 		 * BCM4785.
3654 		 */
3655 		if (tg3_flag(tp, IS_SSB_CORE))
3656 			return 0;
3657 
3658 		rc = tg3_txcpu_pause(tp);
3659 	}
3660 
3661 	if (rc) {
3662 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3663 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3664 		return -ENODEV;
3665 	}
3666 
3667 	/* Clear firmware's nvram arbitration. */
3668 	if (tg3_flag(tp, NVRAM))
3669 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3670 	return 0;
3671 }
3672 
tg3_fw_data_len(struct tg3 * tp,const struct tg3_firmware_hdr * fw_hdr)3673 static int tg3_fw_data_len(struct tg3 *tp,
3674 			   const struct tg3_firmware_hdr *fw_hdr)
3675 {
3676 	int fw_len;
3677 
3678 	/* Non fragmented firmware have one firmware header followed by a
3679 	 * contiguous chunk of data to be written. The length field in that
3680 	 * header is not the length of data to be written but the complete
3681 	 * length of the bss. The data length is determined based on
3682 	 * tp->fw->size minus headers.
3683 	 *
3684 	 * Fragmented firmware have a main header followed by multiple
3685 	 * fragments. Each fragment is identical to non fragmented firmware
3686 	 * with a firmware header followed by a contiguous chunk of data. In
3687 	 * the main header, the length field is unused and set to 0xffffffff.
3688 	 * In each fragment header the length is the entire size of that
3689 	 * fragment i.e. fragment data + header length. Data length is
3690 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3691 	 */
3692 	if (tp->fw_len == 0xffffffff)
3693 		fw_len = be32_to_cpu(fw_hdr->len);
3694 	else
3695 		fw_len = tp->fw->size;
3696 
3697 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3698 }
3699 
3700 /* tp->lock is held. */
tg3_load_firmware_cpu(struct tg3 * tp,u32 cpu_base,u32 cpu_scratch_base,int cpu_scratch_size,const struct tg3_firmware_hdr * fw_hdr)3701 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3702 				 u32 cpu_scratch_base, int cpu_scratch_size,
3703 				 const struct tg3_firmware_hdr *fw_hdr)
3704 {
3705 	int err, i;
3706 	void (*write_op)(struct tg3 *, u32, u32);
3707 	int total_len = tp->fw->size;
3708 
3709 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3710 		netdev_err(tp->dev,
3711 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3712 			   __func__);
3713 		return -EINVAL;
3714 	}
3715 
3716 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3717 		write_op = tg3_write_mem;
3718 	else
3719 		write_op = tg3_write_indirect_reg32;
3720 
3721 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3722 		/* It is possible that bootcode is still loading at this point.
3723 		 * Get the nvram lock first before halting the cpu.
3724 		 */
3725 		int lock_err = tg3_nvram_lock(tp);
3726 		err = tg3_halt_cpu(tp, cpu_base);
3727 		if (!lock_err)
3728 			tg3_nvram_unlock(tp);
3729 		if (err)
3730 			goto out;
3731 
3732 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3733 			write_op(tp, cpu_scratch_base + i, 0);
3734 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3735 		tw32(cpu_base + CPU_MODE,
3736 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3737 	} else {
3738 		/* Subtract additional main header for fragmented firmware and
3739 		 * advance to the first fragment
3740 		 */
3741 		total_len -= TG3_FW_HDR_LEN;
3742 		fw_hdr++;
3743 	}
3744 
3745 	do {
3746 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3747 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3748 			write_op(tp, cpu_scratch_base +
3749 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3750 				     (i * sizeof(u32)),
3751 				 be32_to_cpu(fw_data[i]));
3752 
3753 		total_len -= be32_to_cpu(fw_hdr->len);
3754 
3755 		/* Advance to next fragment */
3756 		fw_hdr = (struct tg3_firmware_hdr *)
3757 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3758 	} while (total_len > 0);
3759 
3760 	err = 0;
3761 
3762 out:
3763 	return err;
3764 }
3765 
3766 /* tp->lock is held. */
tg3_pause_cpu_and_set_pc(struct tg3 * tp,u32 cpu_base,u32 pc)3767 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3768 {
3769 	int i;
3770 	const int iters = 5;
3771 
3772 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3773 	tw32_f(cpu_base + CPU_PC, pc);
3774 
3775 	for (i = 0; i < iters; i++) {
3776 		if (tr32(cpu_base + CPU_PC) == pc)
3777 			break;
3778 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3779 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3780 		tw32_f(cpu_base + CPU_PC, pc);
3781 		udelay(1000);
3782 	}
3783 
3784 	return (i == iters) ? -EBUSY : 0;
3785 }
3786 
3787 /* tp->lock is held. */
tg3_load_5701_a0_firmware_fix(struct tg3 * tp)3788 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3789 {
3790 	const struct tg3_firmware_hdr *fw_hdr;
3791 	int err;
3792 
3793 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3794 
3795 	/* Firmware blob starts with version numbers, followed by
3796 	   start address and length. We are setting complete length.
3797 	   length = end_address_of_bss - start_address_of_text.
3798 	   Remainder is the blob to be loaded contiguously
3799 	   from start address. */
3800 
3801 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3802 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3803 				    fw_hdr);
3804 	if (err)
3805 		return err;
3806 
3807 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3808 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3809 				    fw_hdr);
3810 	if (err)
3811 		return err;
3812 
3813 	/* Now startup only the RX cpu. */
3814 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3815 				       be32_to_cpu(fw_hdr->base_addr));
3816 	if (err) {
3817 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3818 			   "should be %08x\n", __func__,
3819 			   tr32(RX_CPU_BASE + CPU_PC),
3820 				be32_to_cpu(fw_hdr->base_addr));
3821 		return -ENODEV;
3822 	}
3823 
3824 	tg3_rxcpu_resume(tp);
3825 
3826 	return 0;
3827 }
3828 
tg3_validate_rxcpu_state(struct tg3 * tp)3829 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3830 {
3831 	const int iters = 1000;
3832 	int i;
3833 	u32 val;
3834 
3835 	/* Wait for boot code to complete initialization and enter service
3836 	 * loop. It is then safe to download service patches
3837 	 */
3838 	for (i = 0; i < iters; i++) {
3839 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3840 			break;
3841 
3842 		udelay(10);
3843 	}
3844 
3845 	if (i == iters) {
3846 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3847 		return -EBUSY;
3848 	}
3849 
3850 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3851 	if (val & 0xff) {
3852 		netdev_warn(tp->dev,
3853 			    "Other patches exist. Not downloading EEE patch\n");
3854 		return -EEXIST;
3855 	}
3856 
3857 	return 0;
3858 }
3859 
3860 /* tp->lock is held. */
tg3_load_57766_firmware(struct tg3 * tp)3861 static void tg3_load_57766_firmware(struct tg3 *tp)
3862 {
3863 	struct tg3_firmware_hdr *fw_hdr;
3864 
3865 	if (!tg3_flag(tp, NO_NVRAM))
3866 		return;
3867 
3868 	if (tg3_validate_rxcpu_state(tp))
3869 		return;
3870 
3871 	if (!tp->fw)
3872 		return;
3873 
3874 	/* This firmware blob has a different format than older firmware
3875 	 * releases as given below. The main difference is we have fragmented
3876 	 * data to be written to non-contiguous locations.
3877 	 *
3878 	 * In the beginning we have a firmware header identical to other
3879 	 * firmware which consists of version, base addr and length. The length
3880 	 * here is unused and set to 0xffffffff.
3881 	 *
3882 	 * This is followed by a series of firmware fragments which are
3883 	 * individually identical to previous firmware. i.e. they have the
3884 	 * firmware header and followed by data for that fragment. The version
3885 	 * field of the individual fragment header is unused.
3886 	 */
3887 
3888 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3889 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3890 		return;
3891 
3892 	if (tg3_rxcpu_pause(tp))
3893 		return;
3894 
3895 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3896 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3897 
3898 	tg3_rxcpu_resume(tp);
3899 }
3900 
3901 /* tp->lock is held. */
tg3_load_tso_firmware(struct tg3 * tp)3902 static int tg3_load_tso_firmware(struct tg3 *tp)
3903 {
3904 	const struct tg3_firmware_hdr *fw_hdr;
3905 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3906 	int err;
3907 
3908 	if (!tg3_flag(tp, FW_TSO))
3909 		return 0;
3910 
3911 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3912 
3913 	/* Firmware blob starts with version numbers, followed by
3914 	   start address and length. We are setting complete length.
3915 	   length = end_address_of_bss - start_address_of_text.
3916 	   Remainder is the blob to be loaded contiguously
3917 	   from start address. */
3918 
3919 	cpu_scratch_size = tp->fw_len;
3920 
3921 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3922 		cpu_base = RX_CPU_BASE;
3923 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3924 	} else {
3925 		cpu_base = TX_CPU_BASE;
3926 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3927 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3928 	}
3929 
3930 	err = tg3_load_firmware_cpu(tp, cpu_base,
3931 				    cpu_scratch_base, cpu_scratch_size,
3932 				    fw_hdr);
3933 	if (err)
3934 		return err;
3935 
3936 	/* Now startup the cpu. */
3937 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3938 				       be32_to_cpu(fw_hdr->base_addr));
3939 	if (err) {
3940 		netdev_err(tp->dev,
3941 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3942 			   __func__, tr32(cpu_base + CPU_PC),
3943 			   be32_to_cpu(fw_hdr->base_addr));
3944 		return -ENODEV;
3945 	}
3946 
3947 	tg3_resume_cpu(tp, cpu_base);
3948 	return 0;
3949 }
3950 
3951 /* tp->lock is held. */
__tg3_set_one_mac_addr(struct tg3 * tp,u8 * mac_addr,int index)3952 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3953 {
3954 	u32 addr_high, addr_low;
3955 
3956 	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3957 	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3958 		    (mac_addr[4] <<  8) | mac_addr[5]);
3959 
3960 	if (index < 4) {
3961 		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3962 		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3963 	} else {
3964 		index -= 4;
3965 		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3966 		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3967 	}
3968 }
3969 
3970 /* tp->lock is held. */
__tg3_set_mac_addr(struct tg3 * tp,bool skip_mac_1)3971 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3972 {
3973 	u32 addr_high;
3974 	int i;
3975 
3976 	for (i = 0; i < 4; i++) {
3977 		if (i == 1 && skip_mac_1)
3978 			continue;
3979 		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3980 	}
3981 
3982 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3983 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3984 		for (i = 4; i < 16; i++)
3985 			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3986 	}
3987 
3988 	addr_high = (tp->dev->dev_addr[0] +
3989 		     tp->dev->dev_addr[1] +
3990 		     tp->dev->dev_addr[2] +
3991 		     tp->dev->dev_addr[3] +
3992 		     tp->dev->dev_addr[4] +
3993 		     tp->dev->dev_addr[5]) &
3994 		TX_BACKOFF_SEED_MASK;
3995 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3996 }
3997 
tg3_enable_register_access(struct tg3 * tp)3998 static void tg3_enable_register_access(struct tg3 *tp)
3999 {
4000 	/*
4001 	 * Make sure register accesses (indirect or otherwise) will function
4002 	 * correctly.
4003 	 */
4004 	pci_write_config_dword(tp->pdev,
4005 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4006 }
4007 
tg3_power_up(struct tg3 * tp)4008 static int tg3_power_up(struct tg3 *tp)
4009 {
4010 	int err;
4011 
4012 	tg3_enable_register_access(tp);
4013 
4014 	err = pci_set_power_state(tp->pdev, PCI_D0);
4015 	if (!err) {
4016 		/* Switch out of Vaux if it is a NIC */
4017 		tg3_pwrsrc_switch_to_vmain(tp);
4018 	} else {
4019 		netdev_err(tp->dev, "Transition to D0 failed\n");
4020 	}
4021 
4022 	return err;
4023 }
4024 
4025 static int tg3_setup_phy(struct tg3 *, bool);
4026 
tg3_power_down_prepare(struct tg3 * tp)4027 static int tg3_power_down_prepare(struct tg3 *tp)
4028 {
4029 	u32 misc_host_ctrl;
4030 	bool device_should_wake, do_low_power;
4031 
4032 	tg3_enable_register_access(tp);
4033 
4034 	/* Restore the CLKREQ setting. */
4035 	if (tg3_flag(tp, CLKREQ_BUG))
4036 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4037 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4038 
4039 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4040 	tw32(TG3PCI_MISC_HOST_CTRL,
4041 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4042 
4043 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4044 			     tg3_flag(tp, WOL_ENABLE);
4045 
4046 	if (tg3_flag(tp, USE_PHYLIB)) {
4047 		do_low_power = false;
4048 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4049 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4050 			__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4051 			struct phy_device *phydev;
4052 			u32 phyid;
4053 
4054 			phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4055 
4056 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4057 
4058 			tp->link_config.speed = phydev->speed;
4059 			tp->link_config.duplex = phydev->duplex;
4060 			tp->link_config.autoneg = phydev->autoneg;
4061 			ethtool_convert_link_mode_to_legacy_u32(
4062 				&tp->link_config.advertising,
4063 				phydev->advertising);
4064 
4065 			linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4066 			linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4067 					 advertising);
4068 			linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4069 					 advertising);
4070 			linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4071 					 advertising);
4072 
4073 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4074 				if (tg3_flag(tp, WOL_SPEED_100MB)) {
4075 					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4076 							 advertising);
4077 					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4078 							 advertising);
4079 					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4080 							 advertising);
4081 				} else {
4082 					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4083 							 advertising);
4084 				}
4085 			}
4086 
4087 			linkmode_copy(phydev->advertising, advertising);
4088 			phy_start_aneg(phydev);
4089 
4090 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4091 			if (phyid != PHY_ID_BCMAC131) {
4092 				phyid &= PHY_BCM_OUI_MASK;
4093 				if (phyid == PHY_BCM_OUI_1 ||
4094 				    phyid == PHY_BCM_OUI_2 ||
4095 				    phyid == PHY_BCM_OUI_3)
4096 					do_low_power = true;
4097 			}
4098 		}
4099 	} else {
4100 		do_low_power = true;
4101 
4102 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4103 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4104 
4105 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4106 			tg3_setup_phy(tp, false);
4107 	}
4108 
4109 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4110 		u32 val;
4111 
4112 		val = tr32(GRC_VCPU_EXT_CTRL);
4113 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4114 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4115 		int i;
4116 		u32 val;
4117 
4118 		for (i = 0; i < 200; i++) {
4119 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4120 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4121 				break;
4122 			msleep(1);
4123 		}
4124 	}
4125 	if (tg3_flag(tp, WOL_CAP))
4126 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4127 						     WOL_DRV_STATE_SHUTDOWN |
4128 						     WOL_DRV_WOL |
4129 						     WOL_SET_MAGIC_PKT);
4130 
4131 	if (device_should_wake) {
4132 		u32 mac_mode;
4133 
4134 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4135 			if (do_low_power &&
4136 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4137 				tg3_phy_auxctl_write(tp,
4138 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4139 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4140 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4141 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4142 				udelay(40);
4143 			}
4144 
4145 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4146 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4147 			else if (tp->phy_flags &
4148 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4149 				if (tp->link_config.active_speed == SPEED_1000)
4150 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4151 				else
4152 					mac_mode = MAC_MODE_PORT_MODE_MII;
4153 			} else
4154 				mac_mode = MAC_MODE_PORT_MODE_MII;
4155 
4156 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4157 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4158 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4159 					     SPEED_100 : SPEED_10;
4160 				if (tg3_5700_link_polarity(tp, speed))
4161 					mac_mode |= MAC_MODE_LINK_POLARITY;
4162 				else
4163 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4164 			}
4165 		} else {
4166 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4167 		}
4168 
4169 		if (!tg3_flag(tp, 5750_PLUS))
4170 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4171 
4172 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4173 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4174 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4175 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4176 
4177 		if (tg3_flag(tp, ENABLE_APE))
4178 			mac_mode |= MAC_MODE_APE_TX_EN |
4179 				    MAC_MODE_APE_RX_EN |
4180 				    MAC_MODE_TDE_ENABLE;
4181 
4182 		tw32_f(MAC_MODE, mac_mode);
4183 		udelay(100);
4184 
4185 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4186 		udelay(10);
4187 	}
4188 
4189 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4190 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4191 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4192 		u32 base_val;
4193 
4194 		base_val = tp->pci_clock_ctrl;
4195 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4196 			     CLOCK_CTRL_TXCLK_DISABLE);
4197 
4198 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4199 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4200 	} else if (tg3_flag(tp, 5780_CLASS) ||
4201 		   tg3_flag(tp, CPMU_PRESENT) ||
4202 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4203 		/* do nothing */
4204 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4205 		u32 newbits1, newbits2;
4206 
4207 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4208 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4209 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4210 				    CLOCK_CTRL_TXCLK_DISABLE |
4211 				    CLOCK_CTRL_ALTCLK);
4212 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4213 		} else if (tg3_flag(tp, 5705_PLUS)) {
4214 			newbits1 = CLOCK_CTRL_625_CORE;
4215 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4216 		} else {
4217 			newbits1 = CLOCK_CTRL_ALTCLK;
4218 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4219 		}
4220 
4221 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4222 			    40);
4223 
4224 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4225 			    40);
4226 
4227 		if (!tg3_flag(tp, 5705_PLUS)) {
4228 			u32 newbits3;
4229 
4230 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4231 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4232 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4233 					    CLOCK_CTRL_TXCLK_DISABLE |
4234 					    CLOCK_CTRL_44MHZ_CORE);
4235 			} else {
4236 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4237 			}
4238 
4239 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4240 				    tp->pci_clock_ctrl | newbits3, 40);
4241 		}
4242 	}
4243 
4244 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4245 		tg3_power_down_phy(tp, do_low_power);
4246 
4247 	tg3_frob_aux_power(tp, true);
4248 
4249 	/* Workaround for unstable PLL clock */
4250 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4251 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4252 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4253 		u32 val = tr32(0x7d00);
4254 
4255 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4256 		tw32(0x7d00, val);
4257 		if (!tg3_flag(tp, ENABLE_ASF)) {
4258 			int err;
4259 
4260 			err = tg3_nvram_lock(tp);
4261 			tg3_halt_cpu(tp, RX_CPU_BASE);
4262 			if (!err)
4263 				tg3_nvram_unlock(tp);
4264 		}
4265 	}
4266 
4267 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4268 
4269 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4270 
4271 	return 0;
4272 }
4273 
tg3_power_down(struct tg3 * tp)4274 static void tg3_power_down(struct tg3 *tp)
4275 {
4276 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4277 	pci_set_power_state(tp->pdev, PCI_D3hot);
4278 }
4279 
tg3_aux_stat_to_speed_duplex(struct tg3 * tp,u32 val,u32 * speed,u8 * duplex)4280 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4281 {
4282 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4283 	case MII_TG3_AUX_STAT_10HALF:
4284 		*speed = SPEED_10;
4285 		*duplex = DUPLEX_HALF;
4286 		break;
4287 
4288 	case MII_TG3_AUX_STAT_10FULL:
4289 		*speed = SPEED_10;
4290 		*duplex = DUPLEX_FULL;
4291 		break;
4292 
4293 	case MII_TG3_AUX_STAT_100HALF:
4294 		*speed = SPEED_100;
4295 		*duplex = DUPLEX_HALF;
4296 		break;
4297 
4298 	case MII_TG3_AUX_STAT_100FULL:
4299 		*speed = SPEED_100;
4300 		*duplex = DUPLEX_FULL;
4301 		break;
4302 
4303 	case MII_TG3_AUX_STAT_1000HALF:
4304 		*speed = SPEED_1000;
4305 		*duplex = DUPLEX_HALF;
4306 		break;
4307 
4308 	case MII_TG3_AUX_STAT_1000FULL:
4309 		*speed = SPEED_1000;
4310 		*duplex = DUPLEX_FULL;
4311 		break;
4312 
4313 	default:
4314 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4315 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4316 				 SPEED_10;
4317 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4318 				  DUPLEX_HALF;
4319 			break;
4320 		}
4321 		*speed = SPEED_UNKNOWN;
4322 		*duplex = DUPLEX_UNKNOWN;
4323 		break;
4324 	}
4325 }
4326 
tg3_phy_autoneg_cfg(struct tg3 * tp,u32 advertise,u32 flowctrl)4327 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4328 {
4329 	int err = 0;
4330 	u32 val, new_adv;
4331 
4332 	new_adv = ADVERTISE_CSMA;
4333 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4334 	new_adv |= mii_advertise_flowctrl(flowctrl);
4335 
4336 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4337 	if (err)
4338 		goto done;
4339 
4340 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4341 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4342 
4343 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4344 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4345 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4346 
4347 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4348 		if (err)
4349 			goto done;
4350 	}
4351 
4352 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4353 		goto done;
4354 
4355 	tw32(TG3_CPMU_EEE_MODE,
4356 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4357 
4358 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4359 	if (!err) {
4360 		u32 err2;
4361 
4362 		val = 0;
4363 		/* Advertise 100-BaseTX EEE ability */
4364 		if (advertise & ADVERTISED_100baseT_Full)
4365 			val |= MDIO_AN_EEE_ADV_100TX;
4366 		/* Advertise 1000-BaseT EEE ability */
4367 		if (advertise & ADVERTISED_1000baseT_Full)
4368 			val |= MDIO_AN_EEE_ADV_1000T;
4369 
4370 		if (!tp->eee.eee_enabled) {
4371 			val = 0;
4372 			tp->eee.advertised = 0;
4373 		} else {
4374 			tp->eee.advertised = advertise &
4375 					     (ADVERTISED_100baseT_Full |
4376 					      ADVERTISED_1000baseT_Full);
4377 		}
4378 
4379 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4380 		if (err)
4381 			val = 0;
4382 
4383 		switch (tg3_asic_rev(tp)) {
4384 		case ASIC_REV_5717:
4385 		case ASIC_REV_57765:
4386 		case ASIC_REV_57766:
4387 		case ASIC_REV_5719:
4388 			/* If we advertised any eee advertisements above... */
4389 			if (val)
4390 				val = MII_TG3_DSP_TAP26_ALNOKO |
4391 				      MII_TG3_DSP_TAP26_RMRXSTO |
4392 				      MII_TG3_DSP_TAP26_OPCSINPT;
4393 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4394 			fallthrough;
4395 		case ASIC_REV_5720:
4396 		case ASIC_REV_5762:
4397 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4398 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4399 						 MII_TG3_DSP_CH34TP2_HIBW01);
4400 		}
4401 
4402 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4403 		if (!err)
4404 			err = err2;
4405 	}
4406 
4407 done:
4408 	return err;
4409 }
4410 
tg3_phy_copper_begin(struct tg3 * tp)4411 static void tg3_phy_copper_begin(struct tg3 *tp)
4412 {
4413 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4414 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4415 		u32 adv, fc;
4416 
4417 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4418 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4419 			adv = ADVERTISED_10baseT_Half |
4420 			      ADVERTISED_10baseT_Full;
4421 			if (tg3_flag(tp, WOL_SPEED_100MB))
4422 				adv |= ADVERTISED_100baseT_Half |
4423 				       ADVERTISED_100baseT_Full;
4424 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4425 				if (!(tp->phy_flags &
4426 				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
4427 					adv |= ADVERTISED_1000baseT_Half;
4428 				adv |= ADVERTISED_1000baseT_Full;
4429 			}
4430 
4431 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4432 		} else {
4433 			adv = tp->link_config.advertising;
4434 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4435 				adv &= ~(ADVERTISED_1000baseT_Half |
4436 					 ADVERTISED_1000baseT_Full);
4437 
4438 			fc = tp->link_config.flowctrl;
4439 		}
4440 
4441 		tg3_phy_autoneg_cfg(tp, adv, fc);
4442 
4443 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4444 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4445 			/* Normally during power down we want to autonegotiate
4446 			 * the lowest possible speed for WOL. However, to avoid
4447 			 * link flap, we leave it untouched.
4448 			 */
4449 			return;
4450 		}
4451 
4452 		tg3_writephy(tp, MII_BMCR,
4453 			     BMCR_ANENABLE | BMCR_ANRESTART);
4454 	} else {
4455 		int i;
4456 		u32 bmcr, orig_bmcr;
4457 
4458 		tp->link_config.active_speed = tp->link_config.speed;
4459 		tp->link_config.active_duplex = tp->link_config.duplex;
4460 
4461 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4462 			/* With autoneg disabled, 5715 only links up when the
4463 			 * advertisement register has the configured speed
4464 			 * enabled.
4465 			 */
4466 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4467 		}
4468 
4469 		bmcr = 0;
4470 		switch (tp->link_config.speed) {
4471 		default:
4472 		case SPEED_10:
4473 			break;
4474 
4475 		case SPEED_100:
4476 			bmcr |= BMCR_SPEED100;
4477 			break;
4478 
4479 		case SPEED_1000:
4480 			bmcr |= BMCR_SPEED1000;
4481 			break;
4482 		}
4483 
4484 		if (tp->link_config.duplex == DUPLEX_FULL)
4485 			bmcr |= BMCR_FULLDPLX;
4486 
4487 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4488 		    (bmcr != orig_bmcr)) {
4489 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4490 			for (i = 0; i < 1500; i++) {
4491 				u32 tmp;
4492 
4493 				udelay(10);
4494 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4495 				    tg3_readphy(tp, MII_BMSR, &tmp))
4496 					continue;
4497 				if (!(tmp & BMSR_LSTATUS)) {
4498 					udelay(40);
4499 					break;
4500 				}
4501 			}
4502 			tg3_writephy(tp, MII_BMCR, bmcr);
4503 			udelay(40);
4504 		}
4505 	}
4506 }
4507 
tg3_phy_pull_config(struct tg3 * tp)4508 static int tg3_phy_pull_config(struct tg3 *tp)
4509 {
4510 	int err;
4511 	u32 val;
4512 
4513 	err = tg3_readphy(tp, MII_BMCR, &val);
4514 	if (err)
4515 		goto done;
4516 
4517 	if (!(val & BMCR_ANENABLE)) {
4518 		tp->link_config.autoneg = AUTONEG_DISABLE;
4519 		tp->link_config.advertising = 0;
4520 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4521 
4522 		err = -EIO;
4523 
4524 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4525 		case 0:
4526 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4527 				goto done;
4528 
4529 			tp->link_config.speed = SPEED_10;
4530 			break;
4531 		case BMCR_SPEED100:
4532 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4533 				goto done;
4534 
4535 			tp->link_config.speed = SPEED_100;
4536 			break;
4537 		case BMCR_SPEED1000:
4538 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4539 				tp->link_config.speed = SPEED_1000;
4540 				break;
4541 			}
4542 			fallthrough;
4543 		default:
4544 			goto done;
4545 		}
4546 
4547 		if (val & BMCR_FULLDPLX)
4548 			tp->link_config.duplex = DUPLEX_FULL;
4549 		else
4550 			tp->link_config.duplex = DUPLEX_HALF;
4551 
4552 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4553 
4554 		err = 0;
4555 		goto done;
4556 	}
4557 
4558 	tp->link_config.autoneg = AUTONEG_ENABLE;
4559 	tp->link_config.advertising = ADVERTISED_Autoneg;
4560 	tg3_flag_set(tp, PAUSE_AUTONEG);
4561 
4562 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4563 		u32 adv;
4564 
4565 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4566 		if (err)
4567 			goto done;
4568 
4569 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4570 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4571 
4572 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4573 	} else {
4574 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4575 	}
4576 
4577 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4578 		u32 adv;
4579 
4580 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4581 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4582 			if (err)
4583 				goto done;
4584 
4585 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4586 		} else {
4587 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4588 			if (err)
4589 				goto done;
4590 
4591 			adv = tg3_decode_flowctrl_1000X(val);
4592 			tp->link_config.flowctrl = adv;
4593 
4594 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4595 			adv = mii_adv_to_ethtool_adv_x(val);
4596 		}
4597 
4598 		tp->link_config.advertising |= adv;
4599 	}
4600 
4601 done:
4602 	return err;
4603 }
4604 
tg3_init_5401phy_dsp(struct tg3 * tp)4605 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4606 {
4607 	int err;
4608 
4609 	/* Turn off tap power management. */
4610 	/* Set Extended packet length bit */
4611 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4612 
4613 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4614 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4615 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4616 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4617 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4618 
4619 	udelay(40);
4620 
4621 	return err;
4622 }
4623 
tg3_phy_eee_config_ok(struct tg3 * tp)4624 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4625 {
4626 	struct ethtool_eee eee;
4627 
4628 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4629 		return true;
4630 
4631 	tg3_eee_pull_config(tp, &eee);
4632 
4633 	if (tp->eee.eee_enabled) {
4634 		if (tp->eee.advertised != eee.advertised ||
4635 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4636 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4637 			return false;
4638 	} else {
4639 		/* EEE is disabled but we're advertising */
4640 		if (eee.advertised)
4641 			return false;
4642 	}
4643 
4644 	return true;
4645 }
4646 
tg3_phy_copper_an_config_ok(struct tg3 * tp,u32 * lcladv)4647 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4648 {
4649 	u32 advmsk, tgtadv, advertising;
4650 
4651 	advertising = tp->link_config.advertising;
4652 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4653 
4654 	advmsk = ADVERTISE_ALL;
4655 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4656 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4657 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4658 	}
4659 
4660 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4661 		return false;
4662 
4663 	if ((*lcladv & advmsk) != tgtadv)
4664 		return false;
4665 
4666 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4667 		u32 tg3_ctrl;
4668 
4669 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4670 
4671 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4672 			return false;
4673 
4674 		if (tgtadv &&
4675 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4676 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4677 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4678 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4679 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4680 		} else {
4681 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4682 		}
4683 
4684 		if (tg3_ctrl != tgtadv)
4685 			return false;
4686 	}
4687 
4688 	return true;
4689 }
4690 
tg3_phy_copper_fetch_rmtadv(struct tg3 * tp,u32 * rmtadv)4691 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4692 {
4693 	u32 lpeth = 0;
4694 
4695 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4696 		u32 val;
4697 
4698 		if (tg3_readphy(tp, MII_STAT1000, &val))
4699 			return false;
4700 
4701 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4702 	}
4703 
4704 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4705 		return false;
4706 
4707 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4708 	tp->link_config.rmt_adv = lpeth;
4709 
4710 	return true;
4711 }
4712 
tg3_test_and_report_link_chg(struct tg3 * tp,bool curr_link_up)4713 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4714 {
4715 	if (curr_link_up != tp->link_up) {
4716 		if (curr_link_up) {
4717 			netif_carrier_on(tp->dev);
4718 		} else {
4719 			netif_carrier_off(tp->dev);
4720 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4721 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4722 		}
4723 
4724 		tg3_link_report(tp);
4725 		return true;
4726 	}
4727 
4728 	return false;
4729 }
4730 
tg3_clear_mac_status(struct tg3 * tp)4731 static void tg3_clear_mac_status(struct tg3 *tp)
4732 {
4733 	tw32(MAC_EVENT, 0);
4734 
4735 	tw32_f(MAC_STATUS,
4736 	       MAC_STATUS_SYNC_CHANGED |
4737 	       MAC_STATUS_CFG_CHANGED |
4738 	       MAC_STATUS_MI_COMPLETION |
4739 	       MAC_STATUS_LNKSTATE_CHANGED);
4740 	udelay(40);
4741 }
4742 
tg3_setup_eee(struct tg3 * tp)4743 static void tg3_setup_eee(struct tg3 *tp)
4744 {
4745 	u32 val;
4746 
4747 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4748 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4749 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4750 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4751 
4752 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4753 
4754 	tw32_f(TG3_CPMU_EEE_CTRL,
4755 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4756 
4757 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4758 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4759 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4760 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4761 
4762 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4763 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4764 
4765 	if (tg3_flag(tp, ENABLE_APE))
4766 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4767 
4768 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4769 
4770 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4771 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4772 	       (tp->eee.tx_lpi_timer & 0xffff));
4773 
4774 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4775 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4776 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4777 }
4778 
tg3_setup_copper_phy(struct tg3 * tp,bool force_reset)4779 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4780 {
4781 	bool current_link_up;
4782 	u32 bmsr, val;
4783 	u32 lcl_adv, rmt_adv;
4784 	u32 current_speed;
4785 	u8 current_duplex;
4786 	int i, err;
4787 
4788 	tg3_clear_mac_status(tp);
4789 
4790 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4791 		tw32_f(MAC_MI_MODE,
4792 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4793 		udelay(80);
4794 	}
4795 
4796 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4797 
4798 	/* Some third-party PHYs need to be reset on link going
4799 	 * down.
4800 	 */
4801 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4802 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4803 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4804 	    tp->link_up) {
4805 		tg3_readphy(tp, MII_BMSR, &bmsr);
4806 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4807 		    !(bmsr & BMSR_LSTATUS))
4808 			force_reset = true;
4809 	}
4810 	if (force_reset)
4811 		tg3_phy_reset(tp);
4812 
4813 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4814 		tg3_readphy(tp, MII_BMSR, &bmsr);
4815 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4816 		    !tg3_flag(tp, INIT_COMPLETE))
4817 			bmsr = 0;
4818 
4819 		if (!(bmsr & BMSR_LSTATUS)) {
4820 			err = tg3_init_5401phy_dsp(tp);
4821 			if (err)
4822 				return err;
4823 
4824 			tg3_readphy(tp, MII_BMSR, &bmsr);
4825 			for (i = 0; i < 1000; i++) {
4826 				udelay(10);
4827 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4828 				    (bmsr & BMSR_LSTATUS)) {
4829 					udelay(40);
4830 					break;
4831 				}
4832 			}
4833 
4834 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4835 			    TG3_PHY_REV_BCM5401_B0 &&
4836 			    !(bmsr & BMSR_LSTATUS) &&
4837 			    tp->link_config.active_speed == SPEED_1000) {
4838 				err = tg3_phy_reset(tp);
4839 				if (!err)
4840 					err = tg3_init_5401phy_dsp(tp);
4841 				if (err)
4842 					return err;
4843 			}
4844 		}
4845 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4846 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4847 		/* 5701 {A0,B0} CRC bug workaround */
4848 		tg3_writephy(tp, 0x15, 0x0a75);
4849 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4850 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4851 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4852 	}
4853 
4854 	/* Clear pending interrupts... */
4855 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4856 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4857 
4858 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4859 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4860 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4861 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4862 
4863 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4864 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4865 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4866 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4867 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4868 		else
4869 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4870 	}
4871 
4872 	current_link_up = false;
4873 	current_speed = SPEED_UNKNOWN;
4874 	current_duplex = DUPLEX_UNKNOWN;
4875 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4876 	tp->link_config.rmt_adv = 0;
4877 
4878 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4879 		err = tg3_phy_auxctl_read(tp,
4880 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4881 					  &val);
4882 		if (!err && !(val & (1 << 10))) {
4883 			tg3_phy_auxctl_write(tp,
4884 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4885 					     val | (1 << 10));
4886 			goto relink;
4887 		}
4888 	}
4889 
4890 	bmsr = 0;
4891 	for (i = 0; i < 100; i++) {
4892 		tg3_readphy(tp, MII_BMSR, &bmsr);
4893 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4894 		    (bmsr & BMSR_LSTATUS))
4895 			break;
4896 		udelay(40);
4897 	}
4898 
4899 	if (bmsr & BMSR_LSTATUS) {
4900 		u32 aux_stat, bmcr;
4901 
4902 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4903 		for (i = 0; i < 2000; i++) {
4904 			udelay(10);
4905 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4906 			    aux_stat)
4907 				break;
4908 		}
4909 
4910 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4911 					     &current_speed,
4912 					     &current_duplex);
4913 
4914 		bmcr = 0;
4915 		for (i = 0; i < 200; i++) {
4916 			tg3_readphy(tp, MII_BMCR, &bmcr);
4917 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4918 				continue;
4919 			if (bmcr && bmcr != 0x7fff)
4920 				break;
4921 			udelay(10);
4922 		}
4923 
4924 		lcl_adv = 0;
4925 		rmt_adv = 0;
4926 
4927 		tp->link_config.active_speed = current_speed;
4928 		tp->link_config.active_duplex = current_duplex;
4929 
4930 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4931 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4932 
4933 			if ((bmcr & BMCR_ANENABLE) &&
4934 			    eee_config_ok &&
4935 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4936 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4937 				current_link_up = true;
4938 
4939 			/* EEE settings changes take effect only after a phy
4940 			 * reset.  If we have skipped a reset due to Link Flap
4941 			 * Avoidance being enabled, do it now.
4942 			 */
4943 			if (!eee_config_ok &&
4944 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4945 			    !force_reset) {
4946 				tg3_setup_eee(tp);
4947 				tg3_phy_reset(tp);
4948 			}
4949 		} else {
4950 			if (!(bmcr & BMCR_ANENABLE) &&
4951 			    tp->link_config.speed == current_speed &&
4952 			    tp->link_config.duplex == current_duplex) {
4953 				current_link_up = true;
4954 			}
4955 		}
4956 
4957 		if (current_link_up &&
4958 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4959 			u32 reg, bit;
4960 
4961 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4962 				reg = MII_TG3_FET_GEN_STAT;
4963 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4964 			} else {
4965 				reg = MII_TG3_EXT_STAT;
4966 				bit = MII_TG3_EXT_STAT_MDIX;
4967 			}
4968 
4969 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4970 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4971 
4972 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4973 		}
4974 	}
4975 
4976 relink:
4977 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4978 		tg3_phy_copper_begin(tp);
4979 
4980 		if (tg3_flag(tp, ROBOSWITCH)) {
4981 			current_link_up = true;
4982 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4983 			current_speed = SPEED_1000;
4984 			current_duplex = DUPLEX_FULL;
4985 			tp->link_config.active_speed = current_speed;
4986 			tp->link_config.active_duplex = current_duplex;
4987 		}
4988 
4989 		tg3_readphy(tp, MII_BMSR, &bmsr);
4990 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4991 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4992 			current_link_up = true;
4993 	}
4994 
4995 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4996 	if (current_link_up) {
4997 		if (tp->link_config.active_speed == SPEED_100 ||
4998 		    tp->link_config.active_speed == SPEED_10)
4999 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5000 		else
5001 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5002 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5003 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5004 	else
5005 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5006 
5007 	/* In order for the 5750 core in BCM4785 chip to work properly
5008 	 * in RGMII mode, the Led Control Register must be set up.
5009 	 */
5010 	if (tg3_flag(tp, RGMII_MODE)) {
5011 		u32 led_ctrl = tr32(MAC_LED_CTRL);
5012 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5013 
5014 		if (tp->link_config.active_speed == SPEED_10)
5015 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5016 		else if (tp->link_config.active_speed == SPEED_100)
5017 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5018 				     LED_CTRL_100MBPS_ON);
5019 		else if (tp->link_config.active_speed == SPEED_1000)
5020 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5021 				     LED_CTRL_1000MBPS_ON);
5022 
5023 		tw32(MAC_LED_CTRL, led_ctrl);
5024 		udelay(40);
5025 	}
5026 
5027 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5028 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5029 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5030 
5031 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5032 		if (current_link_up &&
5033 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5034 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5035 		else
5036 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5037 	}
5038 
5039 	/* ??? Without this setting Netgear GA302T PHY does not
5040 	 * ??? send/receive packets...
5041 	 */
5042 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5043 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5044 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5045 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5046 		udelay(80);
5047 	}
5048 
5049 	tw32_f(MAC_MODE, tp->mac_mode);
5050 	udelay(40);
5051 
5052 	tg3_phy_eee_adjust(tp, current_link_up);
5053 
5054 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5055 		/* Polled via timer. */
5056 		tw32_f(MAC_EVENT, 0);
5057 	} else {
5058 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5059 	}
5060 	udelay(40);
5061 
5062 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5063 	    current_link_up &&
5064 	    tp->link_config.active_speed == SPEED_1000 &&
5065 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5066 		udelay(120);
5067 		tw32_f(MAC_STATUS,
5068 		     (MAC_STATUS_SYNC_CHANGED |
5069 		      MAC_STATUS_CFG_CHANGED));
5070 		udelay(40);
5071 		tg3_write_mem(tp,
5072 			      NIC_SRAM_FIRMWARE_MBOX,
5073 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5074 	}
5075 
5076 	/* Prevent send BD corruption. */
5077 	if (tg3_flag(tp, CLKREQ_BUG)) {
5078 		if (tp->link_config.active_speed == SPEED_100 ||
5079 		    tp->link_config.active_speed == SPEED_10)
5080 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5081 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5082 		else
5083 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5084 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5085 	}
5086 
5087 	tg3_test_and_report_link_chg(tp, current_link_up);
5088 
5089 	return 0;
5090 }
5091 
5092 struct tg3_fiber_aneginfo {
5093 	int state;
5094 #define ANEG_STATE_UNKNOWN		0
5095 #define ANEG_STATE_AN_ENABLE		1
5096 #define ANEG_STATE_RESTART_INIT		2
5097 #define ANEG_STATE_RESTART		3
5098 #define ANEG_STATE_DISABLE_LINK_OK	4
5099 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5100 #define ANEG_STATE_ABILITY_DETECT	6
5101 #define ANEG_STATE_ACK_DETECT_INIT	7
5102 #define ANEG_STATE_ACK_DETECT		8
5103 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5104 #define ANEG_STATE_COMPLETE_ACK		10
5105 #define ANEG_STATE_IDLE_DETECT_INIT	11
5106 #define ANEG_STATE_IDLE_DETECT		12
5107 #define ANEG_STATE_LINK_OK		13
5108 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5109 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5110 
5111 	u32 flags;
5112 #define MR_AN_ENABLE		0x00000001
5113 #define MR_RESTART_AN		0x00000002
5114 #define MR_AN_COMPLETE		0x00000004
5115 #define MR_PAGE_RX		0x00000008
5116 #define MR_NP_LOADED		0x00000010
5117 #define MR_TOGGLE_TX		0x00000020
5118 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5119 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5120 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5121 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5122 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5123 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5124 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5125 #define MR_TOGGLE_RX		0x00002000
5126 #define MR_NP_RX		0x00004000
5127 
5128 #define MR_LINK_OK		0x80000000
5129 
5130 	unsigned long link_time, cur_time;
5131 
5132 	u32 ability_match_cfg;
5133 	int ability_match_count;
5134 
5135 	char ability_match, idle_match, ack_match;
5136 
5137 	u32 txconfig, rxconfig;
5138 #define ANEG_CFG_NP		0x00000080
5139 #define ANEG_CFG_ACK		0x00000040
5140 #define ANEG_CFG_RF2		0x00000020
5141 #define ANEG_CFG_RF1		0x00000010
5142 #define ANEG_CFG_PS2		0x00000001
5143 #define ANEG_CFG_PS1		0x00008000
5144 #define ANEG_CFG_HD		0x00004000
5145 #define ANEG_CFG_FD		0x00002000
5146 #define ANEG_CFG_INVAL		0x00001f06
5147 
5148 };
5149 #define ANEG_OK		0
5150 #define ANEG_DONE	1
5151 #define ANEG_TIMER_ENAB	2
5152 #define ANEG_FAILED	-1
5153 
5154 #define ANEG_STATE_SETTLE_TIME	10000
5155 
tg3_fiber_aneg_smachine(struct tg3 * tp,struct tg3_fiber_aneginfo * ap)5156 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5157 				   struct tg3_fiber_aneginfo *ap)
5158 {
5159 	u16 flowctrl;
5160 	unsigned long delta;
5161 	u32 rx_cfg_reg;
5162 	int ret;
5163 
5164 	if (ap->state == ANEG_STATE_UNKNOWN) {
5165 		ap->rxconfig = 0;
5166 		ap->link_time = 0;
5167 		ap->cur_time = 0;
5168 		ap->ability_match_cfg = 0;
5169 		ap->ability_match_count = 0;
5170 		ap->ability_match = 0;
5171 		ap->idle_match = 0;
5172 		ap->ack_match = 0;
5173 	}
5174 	ap->cur_time++;
5175 
5176 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5177 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5178 
5179 		if (rx_cfg_reg != ap->ability_match_cfg) {
5180 			ap->ability_match_cfg = rx_cfg_reg;
5181 			ap->ability_match = 0;
5182 			ap->ability_match_count = 0;
5183 		} else {
5184 			if (++ap->ability_match_count > 1) {
5185 				ap->ability_match = 1;
5186 				ap->ability_match_cfg = rx_cfg_reg;
5187 			}
5188 		}
5189 		if (rx_cfg_reg & ANEG_CFG_ACK)
5190 			ap->ack_match = 1;
5191 		else
5192 			ap->ack_match = 0;
5193 
5194 		ap->idle_match = 0;
5195 	} else {
5196 		ap->idle_match = 1;
5197 		ap->ability_match_cfg = 0;
5198 		ap->ability_match_count = 0;
5199 		ap->ability_match = 0;
5200 		ap->ack_match = 0;
5201 
5202 		rx_cfg_reg = 0;
5203 	}
5204 
5205 	ap->rxconfig = rx_cfg_reg;
5206 	ret = ANEG_OK;
5207 
5208 	switch (ap->state) {
5209 	case ANEG_STATE_UNKNOWN:
5210 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5211 			ap->state = ANEG_STATE_AN_ENABLE;
5212 
5213 		fallthrough;
5214 	case ANEG_STATE_AN_ENABLE:
5215 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5216 		if (ap->flags & MR_AN_ENABLE) {
5217 			ap->link_time = 0;
5218 			ap->cur_time = 0;
5219 			ap->ability_match_cfg = 0;
5220 			ap->ability_match_count = 0;
5221 			ap->ability_match = 0;
5222 			ap->idle_match = 0;
5223 			ap->ack_match = 0;
5224 
5225 			ap->state = ANEG_STATE_RESTART_INIT;
5226 		} else {
5227 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5228 		}
5229 		break;
5230 
5231 	case ANEG_STATE_RESTART_INIT:
5232 		ap->link_time = ap->cur_time;
5233 		ap->flags &= ~(MR_NP_LOADED);
5234 		ap->txconfig = 0;
5235 		tw32(MAC_TX_AUTO_NEG, 0);
5236 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5237 		tw32_f(MAC_MODE, tp->mac_mode);
5238 		udelay(40);
5239 
5240 		ret = ANEG_TIMER_ENAB;
5241 		ap->state = ANEG_STATE_RESTART;
5242 
5243 		fallthrough;
5244 	case ANEG_STATE_RESTART:
5245 		delta = ap->cur_time - ap->link_time;
5246 		if (delta > ANEG_STATE_SETTLE_TIME)
5247 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5248 		else
5249 			ret = ANEG_TIMER_ENAB;
5250 		break;
5251 
5252 	case ANEG_STATE_DISABLE_LINK_OK:
5253 		ret = ANEG_DONE;
5254 		break;
5255 
5256 	case ANEG_STATE_ABILITY_DETECT_INIT:
5257 		ap->flags &= ~(MR_TOGGLE_TX);
5258 		ap->txconfig = ANEG_CFG_FD;
5259 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5260 		if (flowctrl & ADVERTISE_1000XPAUSE)
5261 			ap->txconfig |= ANEG_CFG_PS1;
5262 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5263 			ap->txconfig |= ANEG_CFG_PS2;
5264 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5265 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5266 		tw32_f(MAC_MODE, tp->mac_mode);
5267 		udelay(40);
5268 
5269 		ap->state = ANEG_STATE_ABILITY_DETECT;
5270 		break;
5271 
5272 	case ANEG_STATE_ABILITY_DETECT:
5273 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5274 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5275 		break;
5276 
5277 	case ANEG_STATE_ACK_DETECT_INIT:
5278 		ap->txconfig |= ANEG_CFG_ACK;
5279 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5280 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5281 		tw32_f(MAC_MODE, tp->mac_mode);
5282 		udelay(40);
5283 
5284 		ap->state = ANEG_STATE_ACK_DETECT;
5285 
5286 		fallthrough;
5287 	case ANEG_STATE_ACK_DETECT:
5288 		if (ap->ack_match != 0) {
5289 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5290 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5291 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5292 			} else {
5293 				ap->state = ANEG_STATE_AN_ENABLE;
5294 			}
5295 		} else if (ap->ability_match != 0 &&
5296 			   ap->rxconfig == 0) {
5297 			ap->state = ANEG_STATE_AN_ENABLE;
5298 		}
5299 		break;
5300 
5301 	case ANEG_STATE_COMPLETE_ACK_INIT:
5302 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5303 			ret = ANEG_FAILED;
5304 			break;
5305 		}
5306 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5307 			       MR_LP_ADV_HALF_DUPLEX |
5308 			       MR_LP_ADV_SYM_PAUSE |
5309 			       MR_LP_ADV_ASYM_PAUSE |
5310 			       MR_LP_ADV_REMOTE_FAULT1 |
5311 			       MR_LP_ADV_REMOTE_FAULT2 |
5312 			       MR_LP_ADV_NEXT_PAGE |
5313 			       MR_TOGGLE_RX |
5314 			       MR_NP_RX);
5315 		if (ap->rxconfig & ANEG_CFG_FD)
5316 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5317 		if (ap->rxconfig & ANEG_CFG_HD)
5318 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5319 		if (ap->rxconfig & ANEG_CFG_PS1)
5320 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5321 		if (ap->rxconfig & ANEG_CFG_PS2)
5322 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5323 		if (ap->rxconfig & ANEG_CFG_RF1)
5324 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5325 		if (ap->rxconfig & ANEG_CFG_RF2)
5326 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5327 		if (ap->rxconfig & ANEG_CFG_NP)
5328 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5329 
5330 		ap->link_time = ap->cur_time;
5331 
5332 		ap->flags ^= (MR_TOGGLE_TX);
5333 		if (ap->rxconfig & 0x0008)
5334 			ap->flags |= MR_TOGGLE_RX;
5335 		if (ap->rxconfig & ANEG_CFG_NP)
5336 			ap->flags |= MR_NP_RX;
5337 		ap->flags |= MR_PAGE_RX;
5338 
5339 		ap->state = ANEG_STATE_COMPLETE_ACK;
5340 		ret = ANEG_TIMER_ENAB;
5341 		break;
5342 
5343 	case ANEG_STATE_COMPLETE_ACK:
5344 		if (ap->ability_match != 0 &&
5345 		    ap->rxconfig == 0) {
5346 			ap->state = ANEG_STATE_AN_ENABLE;
5347 			break;
5348 		}
5349 		delta = ap->cur_time - ap->link_time;
5350 		if (delta > ANEG_STATE_SETTLE_TIME) {
5351 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5352 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5353 			} else {
5354 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5355 				    !(ap->flags & MR_NP_RX)) {
5356 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5357 				} else {
5358 					ret = ANEG_FAILED;
5359 				}
5360 			}
5361 		}
5362 		break;
5363 
5364 	case ANEG_STATE_IDLE_DETECT_INIT:
5365 		ap->link_time = ap->cur_time;
5366 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5367 		tw32_f(MAC_MODE, tp->mac_mode);
5368 		udelay(40);
5369 
5370 		ap->state = ANEG_STATE_IDLE_DETECT;
5371 		ret = ANEG_TIMER_ENAB;
5372 		break;
5373 
5374 	case ANEG_STATE_IDLE_DETECT:
5375 		if (ap->ability_match != 0 &&
5376 		    ap->rxconfig == 0) {
5377 			ap->state = ANEG_STATE_AN_ENABLE;
5378 			break;
5379 		}
5380 		delta = ap->cur_time - ap->link_time;
5381 		if (delta > ANEG_STATE_SETTLE_TIME) {
5382 			/* XXX another gem from the Broadcom driver :( */
5383 			ap->state = ANEG_STATE_LINK_OK;
5384 		}
5385 		break;
5386 
5387 	case ANEG_STATE_LINK_OK:
5388 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5389 		ret = ANEG_DONE;
5390 		break;
5391 
5392 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5393 		/* ??? unimplemented */
5394 		break;
5395 
5396 	case ANEG_STATE_NEXT_PAGE_WAIT:
5397 		/* ??? unimplemented */
5398 		break;
5399 
5400 	default:
5401 		ret = ANEG_FAILED;
5402 		break;
5403 	}
5404 
5405 	return ret;
5406 }
5407 
fiber_autoneg(struct tg3 * tp,u32 * txflags,u32 * rxflags)5408 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5409 {
5410 	int res = 0;
5411 	struct tg3_fiber_aneginfo aninfo;
5412 	int status = ANEG_FAILED;
5413 	unsigned int tick;
5414 	u32 tmp;
5415 
5416 	tw32_f(MAC_TX_AUTO_NEG, 0);
5417 
5418 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5419 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5420 	udelay(40);
5421 
5422 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5423 	udelay(40);
5424 
5425 	memset(&aninfo, 0, sizeof(aninfo));
5426 	aninfo.flags |= MR_AN_ENABLE;
5427 	aninfo.state = ANEG_STATE_UNKNOWN;
5428 	aninfo.cur_time = 0;
5429 	tick = 0;
5430 	while (++tick < 195000) {
5431 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5432 		if (status == ANEG_DONE || status == ANEG_FAILED)
5433 			break;
5434 
5435 		udelay(1);
5436 	}
5437 
5438 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5439 	tw32_f(MAC_MODE, tp->mac_mode);
5440 	udelay(40);
5441 
5442 	*txflags = aninfo.txconfig;
5443 	*rxflags = aninfo.flags;
5444 
5445 	if (status == ANEG_DONE &&
5446 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5447 			     MR_LP_ADV_FULL_DUPLEX)))
5448 		res = 1;
5449 
5450 	return res;
5451 }
5452 
tg3_init_bcm8002(struct tg3 * tp)5453 static void tg3_init_bcm8002(struct tg3 *tp)
5454 {
5455 	u32 mac_status = tr32(MAC_STATUS);
5456 	int i;
5457 
5458 	/* Reset when initting first time or we have a link. */
5459 	if (tg3_flag(tp, INIT_COMPLETE) &&
5460 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5461 		return;
5462 
5463 	/* Set PLL lock range. */
5464 	tg3_writephy(tp, 0x16, 0x8007);
5465 
5466 	/* SW reset */
5467 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5468 
5469 	/* Wait for reset to complete. */
5470 	/* XXX schedule_timeout() ... */
5471 	for (i = 0; i < 500; i++)
5472 		udelay(10);
5473 
5474 	/* Config mode; select PMA/Ch 1 regs. */
5475 	tg3_writephy(tp, 0x10, 0x8411);
5476 
5477 	/* Enable auto-lock and comdet, select txclk for tx. */
5478 	tg3_writephy(tp, 0x11, 0x0a10);
5479 
5480 	tg3_writephy(tp, 0x18, 0x00a0);
5481 	tg3_writephy(tp, 0x16, 0x41ff);
5482 
5483 	/* Assert and deassert POR. */
5484 	tg3_writephy(tp, 0x13, 0x0400);
5485 	udelay(40);
5486 	tg3_writephy(tp, 0x13, 0x0000);
5487 
5488 	tg3_writephy(tp, 0x11, 0x0a50);
5489 	udelay(40);
5490 	tg3_writephy(tp, 0x11, 0x0a10);
5491 
5492 	/* Wait for signal to stabilize */
5493 	/* XXX schedule_timeout() ... */
5494 	for (i = 0; i < 15000; i++)
5495 		udelay(10);
5496 
5497 	/* Deselect the channel register so we can read the PHYID
5498 	 * later.
5499 	 */
5500 	tg3_writephy(tp, 0x10, 0x8011);
5501 }
5502 
tg3_setup_fiber_hw_autoneg(struct tg3 * tp,u32 mac_status)5503 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5504 {
5505 	u16 flowctrl;
5506 	bool current_link_up;
5507 	u32 sg_dig_ctrl, sg_dig_status;
5508 	u32 serdes_cfg, expected_sg_dig_ctrl;
5509 	int workaround, port_a;
5510 
5511 	serdes_cfg = 0;
5512 	expected_sg_dig_ctrl = 0;
5513 	workaround = 0;
5514 	port_a = 1;
5515 	current_link_up = false;
5516 
5517 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5518 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5519 		workaround = 1;
5520 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5521 			port_a = 0;
5522 
5523 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5524 		/* preserve bits 20-23 for voltage regulator */
5525 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5526 	}
5527 
5528 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5529 
5530 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5531 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5532 			if (workaround) {
5533 				u32 val = serdes_cfg;
5534 
5535 				if (port_a)
5536 					val |= 0xc010000;
5537 				else
5538 					val |= 0x4010000;
5539 				tw32_f(MAC_SERDES_CFG, val);
5540 			}
5541 
5542 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5543 		}
5544 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5545 			tg3_setup_flow_control(tp, 0, 0);
5546 			current_link_up = true;
5547 		}
5548 		goto out;
5549 	}
5550 
5551 	/* Want auto-negotiation.  */
5552 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5553 
5554 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5555 	if (flowctrl & ADVERTISE_1000XPAUSE)
5556 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5557 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5558 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5559 
5560 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5561 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5562 		    tp->serdes_counter &&
5563 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5564 				    MAC_STATUS_RCVD_CFG)) ==
5565 		     MAC_STATUS_PCS_SYNCED)) {
5566 			tp->serdes_counter--;
5567 			current_link_up = true;
5568 			goto out;
5569 		}
5570 restart_autoneg:
5571 		if (workaround)
5572 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5573 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5574 		udelay(5);
5575 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5576 
5577 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5578 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5579 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5580 				 MAC_STATUS_SIGNAL_DET)) {
5581 		sg_dig_status = tr32(SG_DIG_STATUS);
5582 		mac_status = tr32(MAC_STATUS);
5583 
5584 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5585 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5586 			u32 local_adv = 0, remote_adv = 0;
5587 
5588 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5589 				local_adv |= ADVERTISE_1000XPAUSE;
5590 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5591 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5592 
5593 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5594 				remote_adv |= LPA_1000XPAUSE;
5595 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5596 				remote_adv |= LPA_1000XPAUSE_ASYM;
5597 
5598 			tp->link_config.rmt_adv =
5599 					   mii_adv_to_ethtool_adv_x(remote_adv);
5600 
5601 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5602 			current_link_up = true;
5603 			tp->serdes_counter = 0;
5604 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5605 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5606 			if (tp->serdes_counter)
5607 				tp->serdes_counter--;
5608 			else {
5609 				if (workaround) {
5610 					u32 val = serdes_cfg;
5611 
5612 					if (port_a)
5613 						val |= 0xc010000;
5614 					else
5615 						val |= 0x4010000;
5616 
5617 					tw32_f(MAC_SERDES_CFG, val);
5618 				}
5619 
5620 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5621 				udelay(40);
5622 
5623 				/* Link parallel detection - link is up */
5624 				/* only if we have PCS_SYNC and not */
5625 				/* receiving config code words */
5626 				mac_status = tr32(MAC_STATUS);
5627 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5628 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5629 					tg3_setup_flow_control(tp, 0, 0);
5630 					current_link_up = true;
5631 					tp->phy_flags |=
5632 						TG3_PHYFLG_PARALLEL_DETECT;
5633 					tp->serdes_counter =
5634 						SERDES_PARALLEL_DET_TIMEOUT;
5635 				} else
5636 					goto restart_autoneg;
5637 			}
5638 		}
5639 	} else {
5640 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5641 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5642 	}
5643 
5644 out:
5645 	return current_link_up;
5646 }
5647 
tg3_setup_fiber_by_hand(struct tg3 * tp,u32 mac_status)5648 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5649 {
5650 	bool current_link_up = false;
5651 
5652 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5653 		goto out;
5654 
5655 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5656 		u32 txflags, rxflags;
5657 		int i;
5658 
5659 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5660 			u32 local_adv = 0, remote_adv = 0;
5661 
5662 			if (txflags & ANEG_CFG_PS1)
5663 				local_adv |= ADVERTISE_1000XPAUSE;
5664 			if (txflags & ANEG_CFG_PS2)
5665 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5666 
5667 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5668 				remote_adv |= LPA_1000XPAUSE;
5669 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5670 				remote_adv |= LPA_1000XPAUSE_ASYM;
5671 
5672 			tp->link_config.rmt_adv =
5673 					   mii_adv_to_ethtool_adv_x(remote_adv);
5674 
5675 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5676 
5677 			current_link_up = true;
5678 		}
5679 		for (i = 0; i < 30; i++) {
5680 			udelay(20);
5681 			tw32_f(MAC_STATUS,
5682 			       (MAC_STATUS_SYNC_CHANGED |
5683 				MAC_STATUS_CFG_CHANGED));
5684 			udelay(40);
5685 			if ((tr32(MAC_STATUS) &
5686 			     (MAC_STATUS_SYNC_CHANGED |
5687 			      MAC_STATUS_CFG_CHANGED)) == 0)
5688 				break;
5689 		}
5690 
5691 		mac_status = tr32(MAC_STATUS);
5692 		if (!current_link_up &&
5693 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5694 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5695 			current_link_up = true;
5696 	} else {
5697 		tg3_setup_flow_control(tp, 0, 0);
5698 
5699 		/* Forcing 1000FD link up. */
5700 		current_link_up = true;
5701 
5702 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5703 		udelay(40);
5704 
5705 		tw32_f(MAC_MODE, tp->mac_mode);
5706 		udelay(40);
5707 	}
5708 
5709 out:
5710 	return current_link_up;
5711 }
5712 
tg3_setup_fiber_phy(struct tg3 * tp,bool force_reset)5713 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5714 {
5715 	u32 orig_pause_cfg;
5716 	u32 orig_active_speed;
5717 	u8 orig_active_duplex;
5718 	u32 mac_status;
5719 	bool current_link_up;
5720 	int i;
5721 
5722 	orig_pause_cfg = tp->link_config.active_flowctrl;
5723 	orig_active_speed = tp->link_config.active_speed;
5724 	orig_active_duplex = tp->link_config.active_duplex;
5725 
5726 	if (!tg3_flag(tp, HW_AUTONEG) &&
5727 	    tp->link_up &&
5728 	    tg3_flag(tp, INIT_COMPLETE)) {
5729 		mac_status = tr32(MAC_STATUS);
5730 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5731 			       MAC_STATUS_SIGNAL_DET |
5732 			       MAC_STATUS_CFG_CHANGED |
5733 			       MAC_STATUS_RCVD_CFG);
5734 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5735 				   MAC_STATUS_SIGNAL_DET)) {
5736 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5737 					    MAC_STATUS_CFG_CHANGED));
5738 			return 0;
5739 		}
5740 	}
5741 
5742 	tw32_f(MAC_TX_AUTO_NEG, 0);
5743 
5744 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5745 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5746 	tw32_f(MAC_MODE, tp->mac_mode);
5747 	udelay(40);
5748 
5749 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5750 		tg3_init_bcm8002(tp);
5751 
5752 	/* Enable link change event even when serdes polling.  */
5753 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5754 	udelay(40);
5755 
5756 	current_link_up = false;
5757 	tp->link_config.rmt_adv = 0;
5758 	mac_status = tr32(MAC_STATUS);
5759 
5760 	if (tg3_flag(tp, HW_AUTONEG))
5761 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5762 	else
5763 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5764 
5765 	tp->napi[0].hw_status->status =
5766 		(SD_STATUS_UPDATED |
5767 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5768 
5769 	for (i = 0; i < 100; i++) {
5770 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5771 				    MAC_STATUS_CFG_CHANGED));
5772 		udelay(5);
5773 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5774 					 MAC_STATUS_CFG_CHANGED |
5775 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5776 			break;
5777 	}
5778 
5779 	mac_status = tr32(MAC_STATUS);
5780 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5781 		current_link_up = false;
5782 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5783 		    tp->serdes_counter == 0) {
5784 			tw32_f(MAC_MODE, (tp->mac_mode |
5785 					  MAC_MODE_SEND_CONFIGS));
5786 			udelay(1);
5787 			tw32_f(MAC_MODE, tp->mac_mode);
5788 		}
5789 	}
5790 
5791 	if (current_link_up) {
5792 		tp->link_config.active_speed = SPEED_1000;
5793 		tp->link_config.active_duplex = DUPLEX_FULL;
5794 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5795 				    LED_CTRL_LNKLED_OVERRIDE |
5796 				    LED_CTRL_1000MBPS_ON));
5797 	} else {
5798 		tp->link_config.active_speed = SPEED_UNKNOWN;
5799 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5800 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5801 				    LED_CTRL_LNKLED_OVERRIDE |
5802 				    LED_CTRL_TRAFFIC_OVERRIDE));
5803 	}
5804 
5805 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5806 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5807 		if (orig_pause_cfg != now_pause_cfg ||
5808 		    orig_active_speed != tp->link_config.active_speed ||
5809 		    orig_active_duplex != tp->link_config.active_duplex)
5810 			tg3_link_report(tp);
5811 	}
5812 
5813 	return 0;
5814 }
5815 
tg3_setup_fiber_mii_phy(struct tg3 * tp,bool force_reset)5816 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5817 {
5818 	int err = 0;
5819 	u32 bmsr, bmcr;
5820 	u32 current_speed = SPEED_UNKNOWN;
5821 	u8 current_duplex = DUPLEX_UNKNOWN;
5822 	bool current_link_up = false;
5823 	u32 local_adv, remote_adv, sgsr;
5824 
5825 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5826 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5827 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5828 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5829 
5830 		if (force_reset)
5831 			tg3_phy_reset(tp);
5832 
5833 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5834 
5835 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5836 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5837 		} else {
5838 			current_link_up = true;
5839 			if (sgsr & SERDES_TG3_SPEED_1000) {
5840 				current_speed = SPEED_1000;
5841 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5842 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5843 				current_speed = SPEED_100;
5844 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5845 			} else {
5846 				current_speed = SPEED_10;
5847 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5848 			}
5849 
5850 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5851 				current_duplex = DUPLEX_FULL;
5852 			else
5853 				current_duplex = DUPLEX_HALF;
5854 		}
5855 
5856 		tw32_f(MAC_MODE, tp->mac_mode);
5857 		udelay(40);
5858 
5859 		tg3_clear_mac_status(tp);
5860 
5861 		goto fiber_setup_done;
5862 	}
5863 
5864 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5865 	tw32_f(MAC_MODE, tp->mac_mode);
5866 	udelay(40);
5867 
5868 	tg3_clear_mac_status(tp);
5869 
5870 	if (force_reset)
5871 		tg3_phy_reset(tp);
5872 
5873 	tp->link_config.rmt_adv = 0;
5874 
5875 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5876 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5877 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5878 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5879 			bmsr |= BMSR_LSTATUS;
5880 		else
5881 			bmsr &= ~BMSR_LSTATUS;
5882 	}
5883 
5884 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5885 
5886 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5887 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5888 		/* do nothing, just check for link up at the end */
5889 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5890 		u32 adv, newadv;
5891 
5892 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5893 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5894 				 ADVERTISE_1000XPAUSE |
5895 				 ADVERTISE_1000XPSE_ASYM |
5896 				 ADVERTISE_SLCT);
5897 
5898 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5899 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5900 
5901 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5902 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5903 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5904 			tg3_writephy(tp, MII_BMCR, bmcr);
5905 
5906 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5907 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5908 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5909 
5910 			return err;
5911 		}
5912 	} else {
5913 		u32 new_bmcr;
5914 
5915 		bmcr &= ~BMCR_SPEED1000;
5916 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5917 
5918 		if (tp->link_config.duplex == DUPLEX_FULL)
5919 			new_bmcr |= BMCR_FULLDPLX;
5920 
5921 		if (new_bmcr != bmcr) {
5922 			/* BMCR_SPEED1000 is a reserved bit that needs
5923 			 * to be set on write.
5924 			 */
5925 			new_bmcr |= BMCR_SPEED1000;
5926 
5927 			/* Force a linkdown */
5928 			if (tp->link_up) {
5929 				u32 adv;
5930 
5931 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5932 				adv &= ~(ADVERTISE_1000XFULL |
5933 					 ADVERTISE_1000XHALF |
5934 					 ADVERTISE_SLCT);
5935 				tg3_writephy(tp, MII_ADVERTISE, adv);
5936 				tg3_writephy(tp, MII_BMCR, bmcr |
5937 							   BMCR_ANRESTART |
5938 							   BMCR_ANENABLE);
5939 				udelay(10);
5940 				tg3_carrier_off(tp);
5941 			}
5942 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5943 			bmcr = new_bmcr;
5944 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5945 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5946 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5947 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5948 					bmsr |= BMSR_LSTATUS;
5949 				else
5950 					bmsr &= ~BMSR_LSTATUS;
5951 			}
5952 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5953 		}
5954 	}
5955 
5956 	if (bmsr & BMSR_LSTATUS) {
5957 		current_speed = SPEED_1000;
5958 		current_link_up = true;
5959 		if (bmcr & BMCR_FULLDPLX)
5960 			current_duplex = DUPLEX_FULL;
5961 		else
5962 			current_duplex = DUPLEX_HALF;
5963 
5964 		local_adv = 0;
5965 		remote_adv = 0;
5966 
5967 		if (bmcr & BMCR_ANENABLE) {
5968 			u32 common;
5969 
5970 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5971 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5972 			common = local_adv & remote_adv;
5973 			if (common & (ADVERTISE_1000XHALF |
5974 				      ADVERTISE_1000XFULL)) {
5975 				if (common & ADVERTISE_1000XFULL)
5976 					current_duplex = DUPLEX_FULL;
5977 				else
5978 					current_duplex = DUPLEX_HALF;
5979 
5980 				tp->link_config.rmt_adv =
5981 					   mii_adv_to_ethtool_adv_x(remote_adv);
5982 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5983 				/* Link is up via parallel detect */
5984 			} else {
5985 				current_link_up = false;
5986 			}
5987 		}
5988 	}
5989 
5990 fiber_setup_done:
5991 	if (current_link_up && current_duplex == DUPLEX_FULL)
5992 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5993 
5994 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5995 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5996 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5997 
5998 	tw32_f(MAC_MODE, tp->mac_mode);
5999 	udelay(40);
6000 
6001 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6002 
6003 	tp->link_config.active_speed = current_speed;
6004 	tp->link_config.active_duplex = current_duplex;
6005 
6006 	tg3_test_and_report_link_chg(tp, current_link_up);
6007 	return err;
6008 }
6009 
tg3_serdes_parallel_detect(struct tg3 * tp)6010 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6011 {
6012 	if (tp->serdes_counter) {
6013 		/* Give autoneg time to complete. */
6014 		tp->serdes_counter--;
6015 		return;
6016 	}
6017 
6018 	if (!tp->link_up &&
6019 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6020 		u32 bmcr;
6021 
6022 		tg3_readphy(tp, MII_BMCR, &bmcr);
6023 		if (bmcr & BMCR_ANENABLE) {
6024 			u32 phy1, phy2;
6025 
6026 			/* Select shadow register 0x1f */
6027 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6028 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6029 
6030 			/* Select expansion interrupt status register */
6031 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6032 					 MII_TG3_DSP_EXP1_INT_STAT);
6033 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6034 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6035 
6036 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6037 				/* We have signal detect and not receiving
6038 				 * config code words, link is up by parallel
6039 				 * detection.
6040 				 */
6041 
6042 				bmcr &= ~BMCR_ANENABLE;
6043 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6044 				tg3_writephy(tp, MII_BMCR, bmcr);
6045 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6046 			}
6047 		}
6048 	} else if (tp->link_up &&
6049 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6050 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6051 		u32 phy2;
6052 
6053 		/* Select expansion interrupt status register */
6054 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6055 				 MII_TG3_DSP_EXP1_INT_STAT);
6056 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6057 		if (phy2 & 0x20) {
6058 			u32 bmcr;
6059 
6060 			/* Config code words received, turn on autoneg. */
6061 			tg3_readphy(tp, MII_BMCR, &bmcr);
6062 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6063 
6064 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6065 
6066 		}
6067 	}
6068 }
6069 
tg3_setup_phy(struct tg3 * tp,bool force_reset)6070 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6071 {
6072 	u32 val;
6073 	int err;
6074 
6075 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6076 		err = tg3_setup_fiber_phy(tp, force_reset);
6077 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6078 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6079 	else
6080 		err = tg3_setup_copper_phy(tp, force_reset);
6081 
6082 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6083 		u32 scale;
6084 
6085 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6086 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6087 			scale = 65;
6088 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6089 			scale = 6;
6090 		else
6091 			scale = 12;
6092 
6093 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6094 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6095 		tw32(GRC_MISC_CFG, val);
6096 	}
6097 
6098 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6099 	      (6 << TX_LENGTHS_IPG_SHIFT);
6100 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6101 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6102 		val |= tr32(MAC_TX_LENGTHS) &
6103 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6104 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6105 
6106 	if (tp->link_config.active_speed == SPEED_1000 &&
6107 	    tp->link_config.active_duplex == DUPLEX_HALF)
6108 		tw32(MAC_TX_LENGTHS, val |
6109 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6110 	else
6111 		tw32(MAC_TX_LENGTHS, val |
6112 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6113 
6114 	if (!tg3_flag(tp, 5705_PLUS)) {
6115 		if (tp->link_up) {
6116 			tw32(HOSTCC_STAT_COAL_TICKS,
6117 			     tp->coal.stats_block_coalesce_usecs);
6118 		} else {
6119 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6120 		}
6121 	}
6122 
6123 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6124 		val = tr32(PCIE_PWR_MGMT_THRESH);
6125 		if (!tp->link_up)
6126 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6127 			      tp->pwrmgmt_thresh;
6128 		else
6129 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6130 		tw32(PCIE_PWR_MGMT_THRESH, val);
6131 	}
6132 
6133 	return err;
6134 }
6135 
6136 /* tp->lock must be held */
tg3_refclk_read(struct tg3 * tp,struct ptp_system_timestamp * sts)6137 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6138 {
6139 	u64 stamp;
6140 
6141 	ptp_read_system_prets(sts);
6142 	stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6143 	ptp_read_system_postts(sts);
6144 	stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6145 
6146 	return stamp;
6147 }
6148 
6149 /* tp->lock must be held */
tg3_refclk_write(struct tg3 * tp,u64 newval)6150 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6151 {
6152 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6153 
6154 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6155 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6156 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6157 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6158 }
6159 
6160 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6161 static inline void tg3_full_unlock(struct tg3 *tp);
tg3_get_ts_info(struct net_device * dev,struct ethtool_ts_info * info)6162 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6163 {
6164 	struct tg3 *tp = netdev_priv(dev);
6165 
6166 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6167 				SOF_TIMESTAMPING_RX_SOFTWARE |
6168 				SOF_TIMESTAMPING_SOFTWARE;
6169 
6170 	if (tg3_flag(tp, PTP_CAPABLE)) {
6171 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6172 					SOF_TIMESTAMPING_RX_HARDWARE |
6173 					SOF_TIMESTAMPING_RAW_HARDWARE;
6174 	}
6175 
6176 	if (tp->ptp_clock)
6177 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6178 	else
6179 		info->phc_index = -1;
6180 
6181 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6182 
6183 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6184 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6185 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6186 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6187 	return 0;
6188 }
6189 
tg3_ptp_adjfreq(struct ptp_clock_info * ptp,s32 ppb)6190 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6191 {
6192 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6193 	bool neg_adj = false;
6194 	u32 correction = 0;
6195 
6196 	if (ppb < 0) {
6197 		neg_adj = true;
6198 		ppb = -ppb;
6199 	}
6200 
6201 	/* Frequency adjustment is performed using hardware with a 24 bit
6202 	 * accumulator and a programmable correction value. On each clk, the
6203 	 * correction value gets added to the accumulator and when it
6204 	 * overflows, the time counter is incremented/decremented.
6205 	 *
6206 	 * So conversion from ppb to correction value is
6207 	 *		ppb * (1 << 24) / 1000000000
6208 	 */
6209 	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6210 		     TG3_EAV_REF_CLK_CORRECT_MASK;
6211 
6212 	tg3_full_lock(tp, 0);
6213 
6214 	if (correction)
6215 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6216 		     TG3_EAV_REF_CLK_CORRECT_EN |
6217 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6218 	else
6219 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6220 
6221 	tg3_full_unlock(tp);
6222 
6223 	return 0;
6224 }
6225 
tg3_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)6226 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6227 {
6228 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6229 
6230 	tg3_full_lock(tp, 0);
6231 	tp->ptp_adjust += delta;
6232 	tg3_full_unlock(tp);
6233 
6234 	return 0;
6235 }
6236 
tg3_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)6237 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6238 			    struct ptp_system_timestamp *sts)
6239 {
6240 	u64 ns;
6241 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6242 
6243 	tg3_full_lock(tp, 0);
6244 	ns = tg3_refclk_read(tp, sts);
6245 	ns += tp->ptp_adjust;
6246 	tg3_full_unlock(tp);
6247 
6248 	*ts = ns_to_timespec64(ns);
6249 
6250 	return 0;
6251 }
6252 
tg3_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)6253 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6254 			   const struct timespec64 *ts)
6255 {
6256 	u64 ns;
6257 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6258 
6259 	ns = timespec64_to_ns(ts);
6260 
6261 	tg3_full_lock(tp, 0);
6262 	tg3_refclk_write(tp, ns);
6263 	tp->ptp_adjust = 0;
6264 	tg3_full_unlock(tp);
6265 
6266 	return 0;
6267 }
6268 
tg3_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)6269 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6270 			  struct ptp_clock_request *rq, int on)
6271 {
6272 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6273 	u32 clock_ctl;
6274 	int rval = 0;
6275 
6276 	switch (rq->type) {
6277 	case PTP_CLK_REQ_PEROUT:
6278 		/* Reject requests with unsupported flags */
6279 		if (rq->perout.flags)
6280 			return -EOPNOTSUPP;
6281 
6282 		if (rq->perout.index != 0)
6283 			return -EINVAL;
6284 
6285 		tg3_full_lock(tp, 0);
6286 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6287 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6288 
6289 		if (on) {
6290 			u64 nsec;
6291 
6292 			nsec = rq->perout.start.sec * 1000000000ULL +
6293 			       rq->perout.start.nsec;
6294 
6295 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6296 				netdev_warn(tp->dev,
6297 					    "Device supports only a one-shot timesync output, period must be 0\n");
6298 				rval = -EINVAL;
6299 				goto err_out;
6300 			}
6301 
6302 			if (nsec & (1ULL << 63)) {
6303 				netdev_warn(tp->dev,
6304 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6305 				rval = -EINVAL;
6306 				goto err_out;
6307 			}
6308 
6309 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6310 			tw32(TG3_EAV_WATCHDOG0_MSB,
6311 			     TG3_EAV_WATCHDOG0_EN |
6312 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6313 
6314 			tw32(TG3_EAV_REF_CLCK_CTL,
6315 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6316 		} else {
6317 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6318 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6319 		}
6320 
6321 err_out:
6322 		tg3_full_unlock(tp);
6323 		return rval;
6324 
6325 	default:
6326 		break;
6327 	}
6328 
6329 	return -EOPNOTSUPP;
6330 }
6331 
6332 static const struct ptp_clock_info tg3_ptp_caps = {
6333 	.owner		= THIS_MODULE,
6334 	.name		= "tg3 clock",
6335 	.max_adj	= 250000000,
6336 	.n_alarm	= 0,
6337 	.n_ext_ts	= 0,
6338 	.n_per_out	= 1,
6339 	.n_pins		= 0,
6340 	.pps		= 0,
6341 	.adjfreq	= tg3_ptp_adjfreq,
6342 	.adjtime	= tg3_ptp_adjtime,
6343 	.gettimex64	= tg3_ptp_gettimex,
6344 	.settime64	= tg3_ptp_settime,
6345 	.enable		= tg3_ptp_enable,
6346 };
6347 
tg3_hwclock_to_timestamp(struct tg3 * tp,u64 hwclock,struct skb_shared_hwtstamps * timestamp)6348 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6349 				     struct skb_shared_hwtstamps *timestamp)
6350 {
6351 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6352 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6353 					   tp->ptp_adjust);
6354 }
6355 
6356 /* tp->lock must be held */
tg3_ptp_init(struct tg3 * tp)6357 static void tg3_ptp_init(struct tg3 *tp)
6358 {
6359 	if (!tg3_flag(tp, PTP_CAPABLE))
6360 		return;
6361 
6362 	/* Initialize the hardware clock to the system time. */
6363 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6364 	tp->ptp_adjust = 0;
6365 	tp->ptp_info = tg3_ptp_caps;
6366 }
6367 
6368 /* tp->lock must be held */
tg3_ptp_resume(struct tg3 * tp)6369 static void tg3_ptp_resume(struct tg3 *tp)
6370 {
6371 	if (!tg3_flag(tp, PTP_CAPABLE))
6372 		return;
6373 
6374 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6375 	tp->ptp_adjust = 0;
6376 }
6377 
tg3_ptp_fini(struct tg3 * tp)6378 static void tg3_ptp_fini(struct tg3 *tp)
6379 {
6380 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6381 		return;
6382 
6383 	ptp_clock_unregister(tp->ptp_clock);
6384 	tp->ptp_clock = NULL;
6385 	tp->ptp_adjust = 0;
6386 }
6387 
tg3_irq_sync(struct tg3 * tp)6388 static inline int tg3_irq_sync(struct tg3 *tp)
6389 {
6390 	return tp->irq_sync;
6391 }
6392 
tg3_rd32_loop(struct tg3 * tp,u32 * dst,u32 off,u32 len)6393 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6394 {
6395 	int i;
6396 
6397 	dst = (u32 *)((u8 *)dst + off);
6398 	for (i = 0; i < len; i += sizeof(u32))
6399 		*dst++ = tr32(off + i);
6400 }
6401 
tg3_dump_legacy_regs(struct tg3 * tp,u32 * regs)6402 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6403 {
6404 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6405 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6406 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6407 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6408 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6409 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6410 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6411 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6412 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6413 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6414 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6415 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6416 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6417 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6418 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6419 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6420 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6421 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6422 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6423 
6424 	if (tg3_flag(tp, SUPPORT_MSIX))
6425 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6426 
6427 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6428 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6429 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6430 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6431 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6432 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6433 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6434 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6435 
6436 	if (!tg3_flag(tp, 5705_PLUS)) {
6437 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6438 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6439 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6440 	}
6441 
6442 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6443 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6444 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6445 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6446 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6447 
6448 	if (tg3_flag(tp, NVRAM))
6449 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6450 }
6451 
tg3_dump_state(struct tg3 * tp)6452 static void tg3_dump_state(struct tg3 *tp)
6453 {
6454 	int i;
6455 	u32 *regs;
6456 
6457 	/* If it is a PCI error, all registers will be 0xffff,
6458 	 * we don't dump them out, just report the error and return
6459 	 */
6460 	if (tp->pdev->error_state != pci_channel_io_normal) {
6461 		netdev_err(tp->dev, "PCI channel ERROR!\n");
6462 		return;
6463 	}
6464 
6465 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6466 	if (!regs)
6467 		return;
6468 
6469 	if (tg3_flag(tp, PCI_EXPRESS)) {
6470 		/* Read up to but not including private PCI registers */
6471 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6472 			regs[i / sizeof(u32)] = tr32(i);
6473 	} else
6474 		tg3_dump_legacy_regs(tp, regs);
6475 
6476 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6477 		if (!regs[i + 0] && !regs[i + 1] &&
6478 		    !regs[i + 2] && !regs[i + 3])
6479 			continue;
6480 
6481 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6482 			   i * 4,
6483 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6484 	}
6485 
6486 	kfree(regs);
6487 
6488 	for (i = 0; i < tp->irq_cnt; i++) {
6489 		struct tg3_napi *tnapi = &tp->napi[i];
6490 
6491 		/* SW status block */
6492 		netdev_err(tp->dev,
6493 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6494 			   i,
6495 			   tnapi->hw_status->status,
6496 			   tnapi->hw_status->status_tag,
6497 			   tnapi->hw_status->rx_jumbo_consumer,
6498 			   tnapi->hw_status->rx_consumer,
6499 			   tnapi->hw_status->rx_mini_consumer,
6500 			   tnapi->hw_status->idx[0].rx_producer,
6501 			   tnapi->hw_status->idx[0].tx_consumer);
6502 
6503 		netdev_err(tp->dev,
6504 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6505 			   i,
6506 			   tnapi->last_tag, tnapi->last_irq_tag,
6507 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6508 			   tnapi->rx_rcb_ptr,
6509 			   tnapi->prodring.rx_std_prod_idx,
6510 			   tnapi->prodring.rx_std_cons_idx,
6511 			   tnapi->prodring.rx_jmb_prod_idx,
6512 			   tnapi->prodring.rx_jmb_cons_idx);
6513 	}
6514 }
6515 
6516 /* This is called whenever we suspect that the system chipset is re-
6517  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6518  * is bogus tx completions. We try to recover by setting the
6519  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6520  * in the workqueue.
6521  */
tg3_tx_recover(struct tg3 * tp)6522 static void tg3_tx_recover(struct tg3 *tp)
6523 {
6524 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6525 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6526 
6527 	netdev_warn(tp->dev,
6528 		    "The system may be re-ordering memory-mapped I/O "
6529 		    "cycles to the network device, attempting to recover. "
6530 		    "Please report the problem to the driver maintainer "
6531 		    "and include system chipset information.\n");
6532 
6533 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6534 }
6535 
tg3_tx_avail(struct tg3_napi * tnapi)6536 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6537 {
6538 	/* Tell compiler to fetch tx indices from memory. */
6539 	barrier();
6540 	return tnapi->tx_pending -
6541 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6542 }
6543 
6544 /* Tigon3 never reports partial packet sends.  So we do not
6545  * need special logic to handle SKBs that have not had all
6546  * of their frags sent yet, like SunGEM does.
6547  */
tg3_tx(struct tg3_napi * tnapi)6548 static void tg3_tx(struct tg3_napi *tnapi)
6549 {
6550 	struct tg3 *tp = tnapi->tp;
6551 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6552 	u32 sw_idx = tnapi->tx_cons;
6553 	struct netdev_queue *txq;
6554 	int index = tnapi - tp->napi;
6555 	unsigned int pkts_compl = 0, bytes_compl = 0;
6556 
6557 	if (tg3_flag(tp, ENABLE_TSS))
6558 		index--;
6559 
6560 	txq = netdev_get_tx_queue(tp->dev, index);
6561 
6562 	while (sw_idx != hw_idx) {
6563 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6564 		struct sk_buff *skb = ri->skb;
6565 		int i, tx_bug = 0;
6566 
6567 		if (unlikely(skb == NULL)) {
6568 			tg3_tx_recover(tp);
6569 			return;
6570 		}
6571 
6572 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6573 			struct skb_shared_hwtstamps timestamp;
6574 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6575 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6576 
6577 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6578 
6579 			skb_tstamp_tx(skb, &timestamp);
6580 		}
6581 
6582 		pci_unmap_single(tp->pdev,
6583 				 dma_unmap_addr(ri, mapping),
6584 				 skb_headlen(skb),
6585 				 PCI_DMA_TODEVICE);
6586 
6587 		ri->skb = NULL;
6588 
6589 		while (ri->fragmented) {
6590 			ri->fragmented = false;
6591 			sw_idx = NEXT_TX(sw_idx);
6592 			ri = &tnapi->tx_buffers[sw_idx];
6593 		}
6594 
6595 		sw_idx = NEXT_TX(sw_idx);
6596 
6597 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6598 			ri = &tnapi->tx_buffers[sw_idx];
6599 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6600 				tx_bug = 1;
6601 
6602 			pci_unmap_page(tp->pdev,
6603 				       dma_unmap_addr(ri, mapping),
6604 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6605 				       PCI_DMA_TODEVICE);
6606 
6607 			while (ri->fragmented) {
6608 				ri->fragmented = false;
6609 				sw_idx = NEXT_TX(sw_idx);
6610 				ri = &tnapi->tx_buffers[sw_idx];
6611 			}
6612 
6613 			sw_idx = NEXT_TX(sw_idx);
6614 		}
6615 
6616 		pkts_compl++;
6617 		bytes_compl += skb->len;
6618 
6619 		dev_consume_skb_any(skb);
6620 
6621 		if (unlikely(tx_bug)) {
6622 			tg3_tx_recover(tp);
6623 			return;
6624 		}
6625 	}
6626 
6627 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6628 
6629 	tnapi->tx_cons = sw_idx;
6630 
6631 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6632 	 * before checking for netif_queue_stopped().  Without the
6633 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6634 	 * will miss it and cause the queue to be stopped forever.
6635 	 */
6636 	smp_mb();
6637 
6638 	if (unlikely(netif_tx_queue_stopped(txq) &&
6639 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6640 		__netif_tx_lock(txq, smp_processor_id());
6641 		if (netif_tx_queue_stopped(txq) &&
6642 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6643 			netif_tx_wake_queue(txq);
6644 		__netif_tx_unlock(txq);
6645 	}
6646 }
6647 
tg3_frag_free(bool is_frag,void * data)6648 static void tg3_frag_free(bool is_frag, void *data)
6649 {
6650 	if (is_frag)
6651 		skb_free_frag(data);
6652 	else
6653 		kfree(data);
6654 }
6655 
tg3_rx_data_free(struct tg3 * tp,struct ring_info * ri,u32 map_sz)6656 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6657 {
6658 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6659 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6660 
6661 	if (!ri->data)
6662 		return;
6663 
6664 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6665 			 map_sz, PCI_DMA_FROMDEVICE);
6666 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6667 	ri->data = NULL;
6668 }
6669 
6670 
6671 /* Returns size of skb allocated or < 0 on error.
6672  *
6673  * We only need to fill in the address because the other members
6674  * of the RX descriptor are invariant, see tg3_init_rings.
6675  *
6676  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6677  * posting buffers we only dirty the first cache line of the RX
6678  * descriptor (containing the address).  Whereas for the RX status
6679  * buffers the cpu only reads the last cacheline of the RX descriptor
6680  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6681  */
tg3_alloc_rx_data(struct tg3 * tp,struct tg3_rx_prodring_set * tpr,u32 opaque_key,u32 dest_idx_unmasked,unsigned int * frag_size)6682 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6683 			     u32 opaque_key, u32 dest_idx_unmasked,
6684 			     unsigned int *frag_size)
6685 {
6686 	struct tg3_rx_buffer_desc *desc;
6687 	struct ring_info *map;
6688 	u8 *data;
6689 	dma_addr_t mapping;
6690 	int skb_size, data_size, dest_idx;
6691 
6692 	switch (opaque_key) {
6693 	case RXD_OPAQUE_RING_STD:
6694 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6695 		desc = &tpr->rx_std[dest_idx];
6696 		map = &tpr->rx_std_buffers[dest_idx];
6697 		data_size = tp->rx_pkt_map_sz;
6698 		break;
6699 
6700 	case RXD_OPAQUE_RING_JUMBO:
6701 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6702 		desc = &tpr->rx_jmb[dest_idx].std;
6703 		map = &tpr->rx_jmb_buffers[dest_idx];
6704 		data_size = TG3_RX_JMB_MAP_SZ;
6705 		break;
6706 
6707 	default:
6708 		return -EINVAL;
6709 	}
6710 
6711 	/* Do not overwrite any of the map or rp information
6712 	 * until we are sure we can commit to a new buffer.
6713 	 *
6714 	 * Callers depend upon this behavior and assume that
6715 	 * we leave everything unchanged if we fail.
6716 	 */
6717 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6718 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6719 	if (skb_size <= PAGE_SIZE) {
6720 		data = napi_alloc_frag(skb_size);
6721 		*frag_size = skb_size;
6722 	} else {
6723 		data = kmalloc(skb_size, GFP_ATOMIC);
6724 		*frag_size = 0;
6725 	}
6726 	if (!data)
6727 		return -ENOMEM;
6728 
6729 	mapping = pci_map_single(tp->pdev,
6730 				 data + TG3_RX_OFFSET(tp),
6731 				 data_size,
6732 				 PCI_DMA_FROMDEVICE);
6733 	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6734 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6735 		return -EIO;
6736 	}
6737 
6738 	map->data = data;
6739 	dma_unmap_addr_set(map, mapping, mapping);
6740 
6741 	desc->addr_hi = ((u64)mapping >> 32);
6742 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6743 
6744 	return data_size;
6745 }
6746 
6747 /* We only need to move over in the address because the other
6748  * members of the RX descriptor are invariant.  See notes above
6749  * tg3_alloc_rx_data for full details.
6750  */
tg3_recycle_rx(struct tg3_napi * tnapi,struct tg3_rx_prodring_set * dpr,u32 opaque_key,int src_idx,u32 dest_idx_unmasked)6751 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6752 			   struct tg3_rx_prodring_set *dpr,
6753 			   u32 opaque_key, int src_idx,
6754 			   u32 dest_idx_unmasked)
6755 {
6756 	struct tg3 *tp = tnapi->tp;
6757 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6758 	struct ring_info *src_map, *dest_map;
6759 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6760 	int dest_idx;
6761 
6762 	switch (opaque_key) {
6763 	case RXD_OPAQUE_RING_STD:
6764 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6765 		dest_desc = &dpr->rx_std[dest_idx];
6766 		dest_map = &dpr->rx_std_buffers[dest_idx];
6767 		src_desc = &spr->rx_std[src_idx];
6768 		src_map = &spr->rx_std_buffers[src_idx];
6769 		break;
6770 
6771 	case RXD_OPAQUE_RING_JUMBO:
6772 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6773 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6774 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6775 		src_desc = &spr->rx_jmb[src_idx].std;
6776 		src_map = &spr->rx_jmb_buffers[src_idx];
6777 		break;
6778 
6779 	default:
6780 		return;
6781 	}
6782 
6783 	dest_map->data = src_map->data;
6784 	dma_unmap_addr_set(dest_map, mapping,
6785 			   dma_unmap_addr(src_map, mapping));
6786 	dest_desc->addr_hi = src_desc->addr_hi;
6787 	dest_desc->addr_lo = src_desc->addr_lo;
6788 
6789 	/* Ensure that the update to the skb happens after the physical
6790 	 * addresses have been transferred to the new BD location.
6791 	 */
6792 	smp_wmb();
6793 
6794 	src_map->data = NULL;
6795 }
6796 
6797 /* The RX ring scheme is composed of multiple rings which post fresh
6798  * buffers to the chip, and one special ring the chip uses to report
6799  * status back to the host.
6800  *
6801  * The special ring reports the status of received packets to the
6802  * host.  The chip does not write into the original descriptor the
6803  * RX buffer was obtained from.  The chip simply takes the original
6804  * descriptor as provided by the host, updates the status and length
6805  * field, then writes this into the next status ring entry.
6806  *
6807  * Each ring the host uses to post buffers to the chip is described
6808  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6809  * it is first placed into the on-chip ram.  When the packet's length
6810  * is known, it walks down the TG3_BDINFO entries to select the ring.
6811  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6812  * which is within the range of the new packet's length is chosen.
6813  *
6814  * The "separate ring for rx status" scheme may sound queer, but it makes
6815  * sense from a cache coherency perspective.  If only the host writes
6816  * to the buffer post rings, and only the chip writes to the rx status
6817  * rings, then cache lines never move beyond shared-modified state.
6818  * If both the host and chip were to write into the same ring, cache line
6819  * eviction could occur since both entities want it in an exclusive state.
6820  */
tg3_rx(struct tg3_napi * tnapi,int budget)6821 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6822 {
6823 	struct tg3 *tp = tnapi->tp;
6824 	u32 work_mask, rx_std_posted = 0;
6825 	u32 std_prod_idx, jmb_prod_idx;
6826 	u32 sw_idx = tnapi->rx_rcb_ptr;
6827 	u16 hw_idx;
6828 	int received;
6829 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6830 
6831 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6832 	/*
6833 	 * We need to order the read of hw_idx and the read of
6834 	 * the opaque cookie.
6835 	 */
6836 	rmb();
6837 	work_mask = 0;
6838 	received = 0;
6839 	std_prod_idx = tpr->rx_std_prod_idx;
6840 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6841 	while (sw_idx != hw_idx && budget > 0) {
6842 		struct ring_info *ri;
6843 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6844 		unsigned int len;
6845 		struct sk_buff *skb;
6846 		dma_addr_t dma_addr;
6847 		u32 opaque_key, desc_idx, *post_ptr;
6848 		u8 *data;
6849 		u64 tstamp = 0;
6850 
6851 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6852 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6853 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6854 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6855 			dma_addr = dma_unmap_addr(ri, mapping);
6856 			data = ri->data;
6857 			post_ptr = &std_prod_idx;
6858 			rx_std_posted++;
6859 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6860 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6861 			dma_addr = dma_unmap_addr(ri, mapping);
6862 			data = ri->data;
6863 			post_ptr = &jmb_prod_idx;
6864 		} else
6865 			goto next_pkt_nopost;
6866 
6867 		work_mask |= opaque_key;
6868 
6869 		if (desc->err_vlan & RXD_ERR_MASK) {
6870 		drop_it:
6871 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6872 				       desc_idx, *post_ptr);
6873 		drop_it_no_recycle:
6874 			/* Other statistics kept track of by card. */
6875 			tnapi->rx_dropped++;
6876 			goto next_pkt;
6877 		}
6878 
6879 		prefetch(data + TG3_RX_OFFSET(tp));
6880 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6881 		      ETH_FCS_LEN;
6882 
6883 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6884 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6885 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6886 		     RXD_FLAG_PTPSTAT_PTPV2) {
6887 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6888 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6889 		}
6890 
6891 		if (len > TG3_RX_COPY_THRESH(tp)) {
6892 			int skb_size;
6893 			unsigned int frag_size;
6894 
6895 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6896 						    *post_ptr, &frag_size);
6897 			if (skb_size < 0)
6898 				goto drop_it;
6899 
6900 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
6901 					 PCI_DMA_FROMDEVICE);
6902 
6903 			/* Ensure that the update to the data happens
6904 			 * after the usage of the old DMA mapping.
6905 			 */
6906 			smp_wmb();
6907 
6908 			ri->data = NULL;
6909 
6910 			skb = build_skb(data, frag_size);
6911 			if (!skb) {
6912 				tg3_frag_free(frag_size != 0, data);
6913 				goto drop_it_no_recycle;
6914 			}
6915 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6916 		} else {
6917 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6918 				       desc_idx, *post_ptr);
6919 
6920 			skb = netdev_alloc_skb(tp->dev,
6921 					       len + TG3_RAW_IP_ALIGN);
6922 			if (skb == NULL)
6923 				goto drop_it_no_recycle;
6924 
6925 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6926 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6927 			memcpy(skb->data,
6928 			       data + TG3_RX_OFFSET(tp),
6929 			       len);
6930 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6931 		}
6932 
6933 		skb_put(skb, len);
6934 		if (tstamp)
6935 			tg3_hwclock_to_timestamp(tp, tstamp,
6936 						 skb_hwtstamps(skb));
6937 
6938 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6939 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6940 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6941 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6942 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6943 		else
6944 			skb_checksum_none_assert(skb);
6945 
6946 		skb->protocol = eth_type_trans(skb, tp->dev);
6947 
6948 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6949 		    skb->protocol != htons(ETH_P_8021Q) &&
6950 		    skb->protocol != htons(ETH_P_8021AD)) {
6951 			dev_kfree_skb_any(skb);
6952 			goto drop_it_no_recycle;
6953 		}
6954 
6955 		if (desc->type_flags & RXD_FLAG_VLAN &&
6956 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6957 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6958 					       desc->err_vlan & RXD_VLAN_MASK);
6959 
6960 		napi_gro_receive(&tnapi->napi, skb);
6961 
6962 		received++;
6963 		budget--;
6964 
6965 next_pkt:
6966 		(*post_ptr)++;
6967 
6968 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6969 			tpr->rx_std_prod_idx = std_prod_idx &
6970 					       tp->rx_std_ring_mask;
6971 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6972 				     tpr->rx_std_prod_idx);
6973 			work_mask &= ~RXD_OPAQUE_RING_STD;
6974 			rx_std_posted = 0;
6975 		}
6976 next_pkt_nopost:
6977 		sw_idx++;
6978 		sw_idx &= tp->rx_ret_ring_mask;
6979 
6980 		/* Refresh hw_idx to see if there is new work */
6981 		if (sw_idx == hw_idx) {
6982 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6983 			rmb();
6984 		}
6985 	}
6986 
6987 	/* ACK the status ring. */
6988 	tnapi->rx_rcb_ptr = sw_idx;
6989 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6990 
6991 	/* Refill RX ring(s). */
6992 	if (!tg3_flag(tp, ENABLE_RSS)) {
6993 		/* Sync BD data before updating mailbox */
6994 		wmb();
6995 
6996 		if (work_mask & RXD_OPAQUE_RING_STD) {
6997 			tpr->rx_std_prod_idx = std_prod_idx &
6998 					       tp->rx_std_ring_mask;
6999 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7000 				     tpr->rx_std_prod_idx);
7001 		}
7002 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
7003 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
7004 					       tp->rx_jmb_ring_mask;
7005 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7006 				     tpr->rx_jmb_prod_idx);
7007 		}
7008 	} else if (work_mask) {
7009 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7010 		 * updated before the producer indices can be updated.
7011 		 */
7012 		smp_wmb();
7013 
7014 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7015 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7016 
7017 		if (tnapi != &tp->napi[1]) {
7018 			tp->rx_refill = true;
7019 			napi_schedule(&tp->napi[1].napi);
7020 		}
7021 	}
7022 
7023 	return received;
7024 }
7025 
tg3_poll_link(struct tg3 * tp)7026 static void tg3_poll_link(struct tg3 *tp)
7027 {
7028 	/* handle link change and other phy events */
7029 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7030 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7031 
7032 		if (sblk->status & SD_STATUS_LINK_CHG) {
7033 			sblk->status = SD_STATUS_UPDATED |
7034 				       (sblk->status & ~SD_STATUS_LINK_CHG);
7035 			spin_lock(&tp->lock);
7036 			if (tg3_flag(tp, USE_PHYLIB)) {
7037 				tw32_f(MAC_STATUS,
7038 				     (MAC_STATUS_SYNC_CHANGED |
7039 				      MAC_STATUS_CFG_CHANGED |
7040 				      MAC_STATUS_MI_COMPLETION |
7041 				      MAC_STATUS_LNKSTATE_CHANGED));
7042 				udelay(40);
7043 			} else
7044 				tg3_setup_phy(tp, false);
7045 			spin_unlock(&tp->lock);
7046 		}
7047 	}
7048 }
7049 
tg3_rx_prodring_xfer(struct tg3 * tp,struct tg3_rx_prodring_set * dpr,struct tg3_rx_prodring_set * spr)7050 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7051 				struct tg3_rx_prodring_set *dpr,
7052 				struct tg3_rx_prodring_set *spr)
7053 {
7054 	u32 si, di, cpycnt, src_prod_idx;
7055 	int i, err = 0;
7056 
7057 	while (1) {
7058 		src_prod_idx = spr->rx_std_prod_idx;
7059 
7060 		/* Make sure updates to the rx_std_buffers[] entries and the
7061 		 * standard producer index are seen in the correct order.
7062 		 */
7063 		smp_rmb();
7064 
7065 		if (spr->rx_std_cons_idx == src_prod_idx)
7066 			break;
7067 
7068 		if (spr->rx_std_cons_idx < src_prod_idx)
7069 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7070 		else
7071 			cpycnt = tp->rx_std_ring_mask + 1 -
7072 				 spr->rx_std_cons_idx;
7073 
7074 		cpycnt = min(cpycnt,
7075 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7076 
7077 		si = spr->rx_std_cons_idx;
7078 		di = dpr->rx_std_prod_idx;
7079 
7080 		for (i = di; i < di + cpycnt; i++) {
7081 			if (dpr->rx_std_buffers[i].data) {
7082 				cpycnt = i - di;
7083 				err = -ENOSPC;
7084 				break;
7085 			}
7086 		}
7087 
7088 		if (!cpycnt)
7089 			break;
7090 
7091 		/* Ensure that updates to the rx_std_buffers ring and the
7092 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7093 		 * ordered correctly WRT the skb check above.
7094 		 */
7095 		smp_rmb();
7096 
7097 		memcpy(&dpr->rx_std_buffers[di],
7098 		       &spr->rx_std_buffers[si],
7099 		       cpycnt * sizeof(struct ring_info));
7100 
7101 		for (i = 0; i < cpycnt; i++, di++, si++) {
7102 			struct tg3_rx_buffer_desc *sbd, *dbd;
7103 			sbd = &spr->rx_std[si];
7104 			dbd = &dpr->rx_std[di];
7105 			dbd->addr_hi = sbd->addr_hi;
7106 			dbd->addr_lo = sbd->addr_lo;
7107 		}
7108 
7109 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7110 				       tp->rx_std_ring_mask;
7111 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7112 				       tp->rx_std_ring_mask;
7113 	}
7114 
7115 	while (1) {
7116 		src_prod_idx = spr->rx_jmb_prod_idx;
7117 
7118 		/* Make sure updates to the rx_jmb_buffers[] entries and
7119 		 * the jumbo producer index are seen in the correct order.
7120 		 */
7121 		smp_rmb();
7122 
7123 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7124 			break;
7125 
7126 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7127 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7128 		else
7129 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7130 				 spr->rx_jmb_cons_idx;
7131 
7132 		cpycnt = min(cpycnt,
7133 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7134 
7135 		si = spr->rx_jmb_cons_idx;
7136 		di = dpr->rx_jmb_prod_idx;
7137 
7138 		for (i = di; i < di + cpycnt; i++) {
7139 			if (dpr->rx_jmb_buffers[i].data) {
7140 				cpycnt = i - di;
7141 				err = -ENOSPC;
7142 				break;
7143 			}
7144 		}
7145 
7146 		if (!cpycnt)
7147 			break;
7148 
7149 		/* Ensure that updates to the rx_jmb_buffers ring and the
7150 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7151 		 * ordered correctly WRT the skb check above.
7152 		 */
7153 		smp_rmb();
7154 
7155 		memcpy(&dpr->rx_jmb_buffers[di],
7156 		       &spr->rx_jmb_buffers[si],
7157 		       cpycnt * sizeof(struct ring_info));
7158 
7159 		for (i = 0; i < cpycnt; i++, di++, si++) {
7160 			struct tg3_rx_buffer_desc *sbd, *dbd;
7161 			sbd = &spr->rx_jmb[si].std;
7162 			dbd = &dpr->rx_jmb[di].std;
7163 			dbd->addr_hi = sbd->addr_hi;
7164 			dbd->addr_lo = sbd->addr_lo;
7165 		}
7166 
7167 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7168 				       tp->rx_jmb_ring_mask;
7169 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7170 				       tp->rx_jmb_ring_mask;
7171 	}
7172 
7173 	return err;
7174 }
7175 
tg3_poll_work(struct tg3_napi * tnapi,int work_done,int budget)7176 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7177 {
7178 	struct tg3 *tp = tnapi->tp;
7179 
7180 	/* run TX completion thread */
7181 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7182 		tg3_tx(tnapi);
7183 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7184 			return work_done;
7185 	}
7186 
7187 	if (!tnapi->rx_rcb_prod_idx)
7188 		return work_done;
7189 
7190 	/* run RX thread, within the bounds set by NAPI.
7191 	 * All RX "locking" is done by ensuring outside
7192 	 * code synchronizes with tg3->napi.poll()
7193 	 */
7194 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7195 		work_done += tg3_rx(tnapi, budget - work_done);
7196 
7197 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7198 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7199 		int i, err = 0;
7200 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7201 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7202 
7203 		tp->rx_refill = false;
7204 		for (i = 1; i <= tp->rxq_cnt; i++)
7205 			err |= tg3_rx_prodring_xfer(tp, dpr,
7206 						    &tp->napi[i].prodring);
7207 
7208 		wmb();
7209 
7210 		if (std_prod_idx != dpr->rx_std_prod_idx)
7211 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7212 				     dpr->rx_std_prod_idx);
7213 
7214 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7215 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7216 				     dpr->rx_jmb_prod_idx);
7217 
7218 		if (err)
7219 			tw32_f(HOSTCC_MODE, tp->coal_now);
7220 	}
7221 
7222 	return work_done;
7223 }
7224 
tg3_reset_task_schedule(struct tg3 * tp)7225 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7226 {
7227 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7228 		schedule_work(&tp->reset_task);
7229 }
7230 
tg3_reset_task_cancel(struct tg3 * tp)7231 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7232 {
7233 	if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7234 		cancel_work_sync(&tp->reset_task);
7235 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7236 }
7237 
tg3_poll_msix(struct napi_struct * napi,int budget)7238 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7239 {
7240 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7241 	struct tg3 *tp = tnapi->tp;
7242 	int work_done = 0;
7243 	struct tg3_hw_status *sblk = tnapi->hw_status;
7244 
7245 	while (1) {
7246 		work_done = tg3_poll_work(tnapi, work_done, budget);
7247 
7248 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7249 			goto tx_recovery;
7250 
7251 		if (unlikely(work_done >= budget))
7252 			break;
7253 
7254 		/* tp->last_tag is used in tg3_int_reenable() below
7255 		 * to tell the hw how much work has been processed,
7256 		 * so we must read it before checking for more work.
7257 		 */
7258 		tnapi->last_tag = sblk->status_tag;
7259 		tnapi->last_irq_tag = tnapi->last_tag;
7260 		rmb();
7261 
7262 		/* check for RX/TX work to do */
7263 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7264 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7265 
7266 			/* This test here is not race free, but will reduce
7267 			 * the number of interrupts by looping again.
7268 			 */
7269 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7270 				continue;
7271 
7272 			napi_complete_done(napi, work_done);
7273 			/* Reenable interrupts. */
7274 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7275 
7276 			/* This test here is synchronized by napi_schedule()
7277 			 * and napi_complete() to close the race condition.
7278 			 */
7279 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7280 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7281 						  HOSTCC_MODE_ENABLE |
7282 						  tnapi->coal_now);
7283 			}
7284 			break;
7285 		}
7286 	}
7287 
7288 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7289 	return work_done;
7290 
7291 tx_recovery:
7292 	/* work_done is guaranteed to be less than budget. */
7293 	napi_complete(napi);
7294 	tg3_reset_task_schedule(tp);
7295 	return work_done;
7296 }
7297 
tg3_process_error(struct tg3 * tp)7298 static void tg3_process_error(struct tg3 *tp)
7299 {
7300 	u32 val;
7301 	bool real_error = false;
7302 
7303 	if (tg3_flag(tp, ERROR_PROCESSED))
7304 		return;
7305 
7306 	/* Check Flow Attention register */
7307 	val = tr32(HOSTCC_FLOW_ATTN);
7308 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7309 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7310 		real_error = true;
7311 	}
7312 
7313 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7314 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7315 		real_error = true;
7316 	}
7317 
7318 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7319 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7320 		real_error = true;
7321 	}
7322 
7323 	if (!real_error)
7324 		return;
7325 
7326 	tg3_dump_state(tp);
7327 
7328 	tg3_flag_set(tp, ERROR_PROCESSED);
7329 	tg3_reset_task_schedule(tp);
7330 }
7331 
tg3_poll(struct napi_struct * napi,int budget)7332 static int tg3_poll(struct napi_struct *napi, int budget)
7333 {
7334 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7335 	struct tg3 *tp = tnapi->tp;
7336 	int work_done = 0;
7337 	struct tg3_hw_status *sblk = tnapi->hw_status;
7338 
7339 	while (1) {
7340 		if (sblk->status & SD_STATUS_ERROR)
7341 			tg3_process_error(tp);
7342 
7343 		tg3_poll_link(tp);
7344 
7345 		work_done = tg3_poll_work(tnapi, work_done, budget);
7346 
7347 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7348 			goto tx_recovery;
7349 
7350 		if (unlikely(work_done >= budget))
7351 			break;
7352 
7353 		if (tg3_flag(tp, TAGGED_STATUS)) {
7354 			/* tp->last_tag is used in tg3_int_reenable() below
7355 			 * to tell the hw how much work has been processed,
7356 			 * so we must read it before checking for more work.
7357 			 */
7358 			tnapi->last_tag = sblk->status_tag;
7359 			tnapi->last_irq_tag = tnapi->last_tag;
7360 			rmb();
7361 		} else
7362 			sblk->status &= ~SD_STATUS_UPDATED;
7363 
7364 		if (likely(!tg3_has_work(tnapi))) {
7365 			napi_complete_done(napi, work_done);
7366 			tg3_int_reenable(tnapi);
7367 			break;
7368 		}
7369 	}
7370 
7371 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7372 	return work_done;
7373 
7374 tx_recovery:
7375 	/* work_done is guaranteed to be less than budget. */
7376 	napi_complete(napi);
7377 	tg3_reset_task_schedule(tp);
7378 	return work_done;
7379 }
7380 
tg3_napi_disable(struct tg3 * tp)7381 static void tg3_napi_disable(struct tg3 *tp)
7382 {
7383 	int i;
7384 
7385 	for (i = tp->irq_cnt - 1; i >= 0; i--)
7386 		napi_disable(&tp->napi[i].napi);
7387 }
7388 
tg3_napi_enable(struct tg3 * tp)7389 static void tg3_napi_enable(struct tg3 *tp)
7390 {
7391 	int i;
7392 
7393 	for (i = 0; i < tp->irq_cnt; i++)
7394 		napi_enable(&tp->napi[i].napi);
7395 }
7396 
tg3_napi_init(struct tg3 * tp)7397 static void tg3_napi_init(struct tg3 *tp)
7398 {
7399 	int i;
7400 
7401 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7402 	for (i = 1; i < tp->irq_cnt; i++)
7403 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7404 }
7405 
tg3_napi_fini(struct tg3 * tp)7406 static void tg3_napi_fini(struct tg3 *tp)
7407 {
7408 	int i;
7409 
7410 	for (i = 0; i < tp->irq_cnt; i++)
7411 		netif_napi_del(&tp->napi[i].napi);
7412 }
7413 
tg3_netif_stop(struct tg3 * tp)7414 static inline void tg3_netif_stop(struct tg3 *tp)
7415 {
7416 	netif_trans_update(tp->dev);	/* prevent tx timeout */
7417 	tg3_napi_disable(tp);
7418 	netif_carrier_off(tp->dev);
7419 	netif_tx_disable(tp->dev);
7420 }
7421 
7422 /* tp->lock must be held */
tg3_netif_start(struct tg3 * tp)7423 static inline void tg3_netif_start(struct tg3 *tp)
7424 {
7425 	tg3_ptp_resume(tp);
7426 
7427 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7428 	 * appropriate so long as all callers are assured to
7429 	 * have free tx slots (such as after tg3_init_hw)
7430 	 */
7431 	netif_tx_wake_all_queues(tp->dev);
7432 
7433 	if (tp->link_up)
7434 		netif_carrier_on(tp->dev);
7435 
7436 	tg3_napi_enable(tp);
7437 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7438 	tg3_enable_ints(tp);
7439 }
7440 
tg3_irq_quiesce(struct tg3 * tp)7441 static void tg3_irq_quiesce(struct tg3 *tp)
7442 	__releases(tp->lock)
7443 	__acquires(tp->lock)
7444 {
7445 	int i;
7446 
7447 	BUG_ON(tp->irq_sync);
7448 
7449 	tp->irq_sync = 1;
7450 	smp_mb();
7451 
7452 	spin_unlock_bh(&tp->lock);
7453 
7454 	for (i = 0; i < tp->irq_cnt; i++)
7455 		synchronize_irq(tp->napi[i].irq_vec);
7456 
7457 	spin_lock_bh(&tp->lock);
7458 }
7459 
7460 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7461  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7462  * with as well.  Most of the time, this is not necessary except when
7463  * shutting down the device.
7464  */
tg3_full_lock(struct tg3 * tp,int irq_sync)7465 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7466 {
7467 	spin_lock_bh(&tp->lock);
7468 	if (irq_sync)
7469 		tg3_irq_quiesce(tp);
7470 }
7471 
tg3_full_unlock(struct tg3 * tp)7472 static inline void tg3_full_unlock(struct tg3 *tp)
7473 {
7474 	spin_unlock_bh(&tp->lock);
7475 }
7476 
7477 /* One-shot MSI handler - Chip automatically disables interrupt
7478  * after sending MSI so driver doesn't have to do it.
7479  */
tg3_msi_1shot(int irq,void * dev_id)7480 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7481 {
7482 	struct tg3_napi *tnapi = dev_id;
7483 	struct tg3 *tp = tnapi->tp;
7484 
7485 	prefetch(tnapi->hw_status);
7486 	if (tnapi->rx_rcb)
7487 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7488 
7489 	if (likely(!tg3_irq_sync(tp)))
7490 		napi_schedule(&tnapi->napi);
7491 
7492 	return IRQ_HANDLED;
7493 }
7494 
7495 /* MSI ISR - No need to check for interrupt sharing and no need to
7496  * flush status block and interrupt mailbox. PCI ordering rules
7497  * guarantee that MSI will arrive after the status block.
7498  */
tg3_msi(int irq,void * dev_id)7499 static irqreturn_t tg3_msi(int irq, void *dev_id)
7500 {
7501 	struct tg3_napi *tnapi = dev_id;
7502 	struct tg3 *tp = tnapi->tp;
7503 
7504 	prefetch(tnapi->hw_status);
7505 	if (tnapi->rx_rcb)
7506 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7507 	/*
7508 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7509 	 * chip-internal interrupt pending events.
7510 	 * Writing non-zero to intr-mbox-0 additional tells the
7511 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7512 	 * event coalescing.
7513 	 */
7514 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7515 	if (likely(!tg3_irq_sync(tp)))
7516 		napi_schedule(&tnapi->napi);
7517 
7518 	return IRQ_RETVAL(1);
7519 }
7520 
tg3_interrupt(int irq,void * dev_id)7521 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7522 {
7523 	struct tg3_napi *tnapi = dev_id;
7524 	struct tg3 *tp = tnapi->tp;
7525 	struct tg3_hw_status *sblk = tnapi->hw_status;
7526 	unsigned int handled = 1;
7527 
7528 	/* In INTx mode, it is possible for the interrupt to arrive at
7529 	 * the CPU before the status block posted prior to the interrupt.
7530 	 * Reading the PCI State register will confirm whether the
7531 	 * interrupt is ours and will flush the status block.
7532 	 */
7533 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7534 		if (tg3_flag(tp, CHIP_RESETTING) ||
7535 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7536 			handled = 0;
7537 			goto out;
7538 		}
7539 	}
7540 
7541 	/*
7542 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7543 	 * chip-internal interrupt pending events.
7544 	 * Writing non-zero to intr-mbox-0 additional tells the
7545 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7546 	 * event coalescing.
7547 	 *
7548 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7549 	 * spurious interrupts.  The flush impacts performance but
7550 	 * excessive spurious interrupts can be worse in some cases.
7551 	 */
7552 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7553 	if (tg3_irq_sync(tp))
7554 		goto out;
7555 	sblk->status &= ~SD_STATUS_UPDATED;
7556 	if (likely(tg3_has_work(tnapi))) {
7557 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7558 		napi_schedule(&tnapi->napi);
7559 	} else {
7560 		/* No work, shared interrupt perhaps?  re-enable
7561 		 * interrupts, and flush that PCI write
7562 		 */
7563 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7564 			       0x00000000);
7565 	}
7566 out:
7567 	return IRQ_RETVAL(handled);
7568 }
7569 
tg3_interrupt_tagged(int irq,void * dev_id)7570 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7571 {
7572 	struct tg3_napi *tnapi = dev_id;
7573 	struct tg3 *tp = tnapi->tp;
7574 	struct tg3_hw_status *sblk = tnapi->hw_status;
7575 	unsigned int handled = 1;
7576 
7577 	/* In INTx mode, it is possible for the interrupt to arrive at
7578 	 * the CPU before the status block posted prior to the interrupt.
7579 	 * Reading the PCI State register will confirm whether the
7580 	 * interrupt is ours and will flush the status block.
7581 	 */
7582 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7583 		if (tg3_flag(tp, CHIP_RESETTING) ||
7584 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7585 			handled = 0;
7586 			goto out;
7587 		}
7588 	}
7589 
7590 	/*
7591 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7592 	 * chip-internal interrupt pending events.
7593 	 * writing non-zero to intr-mbox-0 additional tells the
7594 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7595 	 * event coalescing.
7596 	 *
7597 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7598 	 * spurious interrupts.  The flush impacts performance but
7599 	 * excessive spurious interrupts can be worse in some cases.
7600 	 */
7601 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7602 
7603 	/*
7604 	 * In a shared interrupt configuration, sometimes other devices'
7605 	 * interrupts will scream.  We record the current status tag here
7606 	 * so that the above check can report that the screaming interrupts
7607 	 * are unhandled.  Eventually they will be silenced.
7608 	 */
7609 	tnapi->last_irq_tag = sblk->status_tag;
7610 
7611 	if (tg3_irq_sync(tp))
7612 		goto out;
7613 
7614 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7615 
7616 	napi_schedule(&tnapi->napi);
7617 
7618 out:
7619 	return IRQ_RETVAL(handled);
7620 }
7621 
7622 /* ISR for interrupt test */
tg3_test_isr(int irq,void * dev_id)7623 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7624 {
7625 	struct tg3_napi *tnapi = dev_id;
7626 	struct tg3 *tp = tnapi->tp;
7627 	struct tg3_hw_status *sblk = tnapi->hw_status;
7628 
7629 	if ((sblk->status & SD_STATUS_UPDATED) ||
7630 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7631 		tg3_disable_ints(tp);
7632 		return IRQ_RETVAL(1);
7633 	}
7634 	return IRQ_RETVAL(0);
7635 }
7636 
7637 #ifdef CONFIG_NET_POLL_CONTROLLER
tg3_poll_controller(struct net_device * dev)7638 static void tg3_poll_controller(struct net_device *dev)
7639 {
7640 	int i;
7641 	struct tg3 *tp = netdev_priv(dev);
7642 
7643 	if (tg3_irq_sync(tp))
7644 		return;
7645 
7646 	for (i = 0; i < tp->irq_cnt; i++)
7647 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7648 }
7649 #endif
7650 
tg3_tx_timeout(struct net_device * dev,unsigned int txqueue)7651 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7652 {
7653 	struct tg3 *tp = netdev_priv(dev);
7654 
7655 	if (netif_msg_tx_err(tp)) {
7656 		netdev_err(dev, "transmit timed out, resetting\n");
7657 		tg3_dump_state(tp);
7658 	}
7659 
7660 	tg3_reset_task_schedule(tp);
7661 }
7662 
7663 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
tg3_4g_overflow_test(dma_addr_t mapping,int len)7664 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7665 {
7666 	u32 base = (u32) mapping & 0xffffffff;
7667 
7668 	return base + len + 8 < base;
7669 }
7670 
7671 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7672  * of any 4GB boundaries: 4G, 8G, etc
7673  */
tg3_4g_tso_overflow_test(struct tg3 * tp,dma_addr_t mapping,u32 len,u32 mss)7674 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7675 					   u32 len, u32 mss)
7676 {
7677 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7678 		u32 base = (u32) mapping & 0xffffffff;
7679 
7680 		return ((base + len + (mss & 0x3fff)) < base);
7681 	}
7682 	return 0;
7683 }
7684 
7685 /* Test for DMA addresses > 40-bit */
tg3_40bit_overflow_test(struct tg3 * tp,dma_addr_t mapping,int len)7686 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7687 					  int len)
7688 {
7689 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7690 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7691 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7692 	return 0;
7693 #else
7694 	return 0;
7695 #endif
7696 }
7697 
tg3_tx_set_bd(struct tg3_tx_buffer_desc * txbd,dma_addr_t mapping,u32 len,u32 flags,u32 mss,u32 vlan)7698 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7699 				 dma_addr_t mapping, u32 len, u32 flags,
7700 				 u32 mss, u32 vlan)
7701 {
7702 	txbd->addr_hi = ((u64) mapping >> 32);
7703 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7704 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7705 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7706 }
7707 
tg3_tx_frag_set(struct tg3_napi * tnapi,u32 * entry,u32 * budget,dma_addr_t map,u32 len,u32 flags,u32 mss,u32 vlan)7708 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7709 			    dma_addr_t map, u32 len, u32 flags,
7710 			    u32 mss, u32 vlan)
7711 {
7712 	struct tg3 *tp = tnapi->tp;
7713 	bool hwbug = false;
7714 
7715 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7716 		hwbug = true;
7717 
7718 	if (tg3_4g_overflow_test(map, len))
7719 		hwbug = true;
7720 
7721 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7722 		hwbug = true;
7723 
7724 	if (tg3_40bit_overflow_test(tp, map, len))
7725 		hwbug = true;
7726 
7727 	if (tp->dma_limit) {
7728 		u32 prvidx = *entry;
7729 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7730 		while (len > tp->dma_limit && *budget) {
7731 			u32 frag_len = tp->dma_limit;
7732 			len -= tp->dma_limit;
7733 
7734 			/* Avoid the 8byte DMA problem */
7735 			if (len <= 8) {
7736 				len += tp->dma_limit / 2;
7737 				frag_len = tp->dma_limit / 2;
7738 			}
7739 
7740 			tnapi->tx_buffers[*entry].fragmented = true;
7741 
7742 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7743 				      frag_len, tmp_flag, mss, vlan);
7744 			*budget -= 1;
7745 			prvidx = *entry;
7746 			*entry = NEXT_TX(*entry);
7747 
7748 			map += frag_len;
7749 		}
7750 
7751 		if (len) {
7752 			if (*budget) {
7753 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7754 					      len, flags, mss, vlan);
7755 				*budget -= 1;
7756 				*entry = NEXT_TX(*entry);
7757 			} else {
7758 				hwbug = true;
7759 				tnapi->tx_buffers[prvidx].fragmented = false;
7760 			}
7761 		}
7762 	} else {
7763 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7764 			      len, flags, mss, vlan);
7765 		*entry = NEXT_TX(*entry);
7766 	}
7767 
7768 	return hwbug;
7769 }
7770 
tg3_tx_skb_unmap(struct tg3_napi * tnapi,u32 entry,int last)7771 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7772 {
7773 	int i;
7774 	struct sk_buff *skb;
7775 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7776 
7777 	skb = txb->skb;
7778 	txb->skb = NULL;
7779 
7780 	pci_unmap_single(tnapi->tp->pdev,
7781 			 dma_unmap_addr(txb, mapping),
7782 			 skb_headlen(skb),
7783 			 PCI_DMA_TODEVICE);
7784 
7785 	while (txb->fragmented) {
7786 		txb->fragmented = false;
7787 		entry = NEXT_TX(entry);
7788 		txb = &tnapi->tx_buffers[entry];
7789 	}
7790 
7791 	for (i = 0; i <= last; i++) {
7792 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7793 
7794 		entry = NEXT_TX(entry);
7795 		txb = &tnapi->tx_buffers[entry];
7796 
7797 		pci_unmap_page(tnapi->tp->pdev,
7798 			       dma_unmap_addr(txb, mapping),
7799 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
7800 
7801 		while (txb->fragmented) {
7802 			txb->fragmented = false;
7803 			entry = NEXT_TX(entry);
7804 			txb = &tnapi->tx_buffers[entry];
7805 		}
7806 	}
7807 }
7808 
7809 /* Workaround 4GB and 40-bit hardware DMA bugs. */
tigon3_dma_hwbug_workaround(struct tg3_napi * tnapi,struct sk_buff ** pskb,u32 * entry,u32 * budget,u32 base_flags,u32 mss,u32 vlan)7810 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7811 				       struct sk_buff **pskb,
7812 				       u32 *entry, u32 *budget,
7813 				       u32 base_flags, u32 mss, u32 vlan)
7814 {
7815 	struct tg3 *tp = tnapi->tp;
7816 	struct sk_buff *new_skb, *skb = *pskb;
7817 	dma_addr_t new_addr = 0;
7818 	int ret = 0;
7819 
7820 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7821 		new_skb = skb_copy(skb, GFP_ATOMIC);
7822 	else {
7823 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7824 
7825 		new_skb = skb_copy_expand(skb,
7826 					  skb_headroom(skb) + more_headroom,
7827 					  skb_tailroom(skb), GFP_ATOMIC);
7828 	}
7829 
7830 	if (!new_skb) {
7831 		ret = -1;
7832 	} else {
7833 		/* New SKB is guaranteed to be linear. */
7834 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7835 					  PCI_DMA_TODEVICE);
7836 		/* Make sure the mapping succeeded */
7837 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7838 			dev_kfree_skb_any(new_skb);
7839 			ret = -1;
7840 		} else {
7841 			u32 save_entry = *entry;
7842 
7843 			base_flags |= TXD_FLAG_END;
7844 
7845 			tnapi->tx_buffers[*entry].skb = new_skb;
7846 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7847 					   mapping, new_addr);
7848 
7849 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7850 					    new_skb->len, base_flags,
7851 					    mss, vlan)) {
7852 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7853 				dev_kfree_skb_any(new_skb);
7854 				ret = -1;
7855 			}
7856 		}
7857 	}
7858 
7859 	dev_consume_skb_any(skb);
7860 	*pskb = new_skb;
7861 	return ret;
7862 }
7863 
tg3_tso_bug_gso_check(struct tg3_napi * tnapi,struct sk_buff * skb)7864 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7865 {
7866 	/* Check if we will never have enough descriptors,
7867 	 * as gso_segs can be more than current ring size
7868 	 */
7869 	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7870 }
7871 
7872 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7873 
7874 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7875  * indicated in tg3_tx_frag_set()
7876  */
tg3_tso_bug(struct tg3 * tp,struct tg3_napi * tnapi,struct netdev_queue * txq,struct sk_buff * skb)7877 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7878 		       struct netdev_queue *txq, struct sk_buff *skb)
7879 {
7880 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7881 	struct sk_buff *segs, *seg, *next;
7882 
7883 	/* Estimate the number of fragments in the worst case */
7884 	if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7885 		netif_tx_stop_queue(txq);
7886 
7887 		/* netif_tx_stop_queue() must be done before checking
7888 		 * checking tx index in tg3_tx_avail() below, because in
7889 		 * tg3_tx(), we update tx index before checking for
7890 		 * netif_tx_queue_stopped().
7891 		 */
7892 		smp_mb();
7893 		if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7894 			return NETDEV_TX_BUSY;
7895 
7896 		netif_tx_wake_queue(txq);
7897 	}
7898 
7899 	segs = skb_gso_segment(skb, tp->dev->features &
7900 				    ~(NETIF_F_TSO | NETIF_F_TSO6));
7901 	if (IS_ERR(segs) || !segs) {
7902 		tnapi->tx_dropped++;
7903 		goto tg3_tso_bug_end;
7904 	}
7905 
7906 	skb_list_walk_safe(segs, seg, next) {
7907 		skb_mark_not_on_list(seg);
7908 		tg3_start_xmit(seg, tp->dev);
7909 	}
7910 
7911 tg3_tso_bug_end:
7912 	dev_consume_skb_any(skb);
7913 
7914 	return NETDEV_TX_OK;
7915 }
7916 
7917 /* hard_start_xmit for all devices */
tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)7918 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7919 {
7920 	struct tg3 *tp = netdev_priv(dev);
7921 	u32 len, entry, base_flags, mss, vlan = 0;
7922 	u32 budget;
7923 	int i = -1, would_hit_hwbug;
7924 	dma_addr_t mapping;
7925 	struct tg3_napi *tnapi;
7926 	struct netdev_queue *txq;
7927 	unsigned int last;
7928 	struct iphdr *iph = NULL;
7929 	struct tcphdr *tcph = NULL;
7930 	__sum16 tcp_csum = 0, ip_csum = 0;
7931 	__be16 ip_tot_len = 0;
7932 
7933 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7934 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7935 	if (tg3_flag(tp, ENABLE_TSS))
7936 		tnapi++;
7937 
7938 	budget = tg3_tx_avail(tnapi);
7939 
7940 	/* We are running in BH disabled context with netif_tx_lock
7941 	 * and TX reclaim runs via tp->napi.poll inside of a software
7942 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7943 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7944 	 */
7945 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7946 		if (!netif_tx_queue_stopped(txq)) {
7947 			netif_tx_stop_queue(txq);
7948 
7949 			/* This is a hard error, log it. */
7950 			netdev_err(dev,
7951 				   "BUG! Tx Ring full when queue awake!\n");
7952 		}
7953 		return NETDEV_TX_BUSY;
7954 	}
7955 
7956 	entry = tnapi->tx_prod;
7957 	base_flags = 0;
7958 
7959 	mss = skb_shinfo(skb)->gso_size;
7960 	if (mss) {
7961 		u32 tcp_opt_len, hdr_len;
7962 
7963 		if (skb_cow_head(skb, 0))
7964 			goto drop;
7965 
7966 		iph = ip_hdr(skb);
7967 		tcp_opt_len = tcp_optlen(skb);
7968 
7969 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7970 
7971 		/* HW/FW can not correctly segment packets that have been
7972 		 * vlan encapsulated.
7973 		 */
7974 		if (skb->protocol == htons(ETH_P_8021Q) ||
7975 		    skb->protocol == htons(ETH_P_8021AD)) {
7976 			if (tg3_tso_bug_gso_check(tnapi, skb))
7977 				return tg3_tso_bug(tp, tnapi, txq, skb);
7978 			goto drop;
7979 		}
7980 
7981 		if (!skb_is_gso_v6(skb)) {
7982 			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7983 			    tg3_flag(tp, TSO_BUG)) {
7984 				if (tg3_tso_bug_gso_check(tnapi, skb))
7985 					return tg3_tso_bug(tp, tnapi, txq, skb);
7986 				goto drop;
7987 			}
7988 			ip_csum = iph->check;
7989 			ip_tot_len = iph->tot_len;
7990 			iph->check = 0;
7991 			iph->tot_len = htons(mss + hdr_len);
7992 		}
7993 
7994 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7995 			       TXD_FLAG_CPU_POST_DMA);
7996 
7997 		tcph = tcp_hdr(skb);
7998 		tcp_csum = tcph->check;
7999 
8000 		if (tg3_flag(tp, HW_TSO_1) ||
8001 		    tg3_flag(tp, HW_TSO_2) ||
8002 		    tg3_flag(tp, HW_TSO_3)) {
8003 			tcph->check = 0;
8004 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
8005 		} else {
8006 			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8007 							 0, IPPROTO_TCP, 0);
8008 		}
8009 
8010 		if (tg3_flag(tp, HW_TSO_3)) {
8011 			mss |= (hdr_len & 0xc) << 12;
8012 			if (hdr_len & 0x10)
8013 				base_flags |= 0x00000010;
8014 			base_flags |= (hdr_len & 0x3e0) << 5;
8015 		} else if (tg3_flag(tp, HW_TSO_2))
8016 			mss |= hdr_len << 9;
8017 		else if (tg3_flag(tp, HW_TSO_1) ||
8018 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
8019 			if (tcp_opt_len || iph->ihl > 5) {
8020 				int tsflags;
8021 
8022 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8023 				mss |= (tsflags << 11);
8024 			}
8025 		} else {
8026 			if (tcp_opt_len || iph->ihl > 5) {
8027 				int tsflags;
8028 
8029 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8030 				base_flags |= tsflags << 12;
8031 			}
8032 		}
8033 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8034 		/* HW/FW can not correctly checksum packets that have been
8035 		 * vlan encapsulated.
8036 		 */
8037 		if (skb->protocol == htons(ETH_P_8021Q) ||
8038 		    skb->protocol == htons(ETH_P_8021AD)) {
8039 			if (skb_checksum_help(skb))
8040 				goto drop;
8041 		} else  {
8042 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
8043 		}
8044 	}
8045 
8046 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8047 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
8048 		base_flags |= TXD_FLAG_JMB_PKT;
8049 
8050 	if (skb_vlan_tag_present(skb)) {
8051 		base_flags |= TXD_FLAG_VLAN;
8052 		vlan = skb_vlan_tag_get(skb);
8053 	}
8054 
8055 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8056 	    tg3_flag(tp, TX_TSTAMP_EN)) {
8057 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8058 		base_flags |= TXD_FLAG_HWTSTAMP;
8059 	}
8060 
8061 	len = skb_headlen(skb);
8062 
8063 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8064 	if (pci_dma_mapping_error(tp->pdev, mapping))
8065 		goto drop;
8066 
8067 
8068 	tnapi->tx_buffers[entry].skb = skb;
8069 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8070 
8071 	would_hit_hwbug = 0;
8072 
8073 	if (tg3_flag(tp, 5701_DMA_BUG))
8074 		would_hit_hwbug = 1;
8075 
8076 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8077 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8078 			    mss, vlan)) {
8079 		would_hit_hwbug = 1;
8080 	} else if (skb_shinfo(skb)->nr_frags > 0) {
8081 		u32 tmp_mss = mss;
8082 
8083 		if (!tg3_flag(tp, HW_TSO_1) &&
8084 		    !tg3_flag(tp, HW_TSO_2) &&
8085 		    !tg3_flag(tp, HW_TSO_3))
8086 			tmp_mss = 0;
8087 
8088 		/* Now loop through additional data
8089 		 * fragments, and queue them.
8090 		 */
8091 		last = skb_shinfo(skb)->nr_frags - 1;
8092 		for (i = 0; i <= last; i++) {
8093 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8094 
8095 			len = skb_frag_size(frag);
8096 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8097 						   len, DMA_TO_DEVICE);
8098 
8099 			tnapi->tx_buffers[entry].skb = NULL;
8100 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8101 					   mapping);
8102 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8103 				goto dma_error;
8104 
8105 			if (!budget ||
8106 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8107 					    len, base_flags |
8108 					    ((i == last) ? TXD_FLAG_END : 0),
8109 					    tmp_mss, vlan)) {
8110 				would_hit_hwbug = 1;
8111 				break;
8112 			}
8113 		}
8114 	}
8115 
8116 	if (would_hit_hwbug) {
8117 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8118 
8119 		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8120 			/* If it's a TSO packet, do GSO instead of
8121 			 * allocating and copying to a large linear SKB
8122 			 */
8123 			if (ip_tot_len) {
8124 				iph->check = ip_csum;
8125 				iph->tot_len = ip_tot_len;
8126 			}
8127 			tcph->check = tcp_csum;
8128 			return tg3_tso_bug(tp, tnapi, txq, skb);
8129 		}
8130 
8131 		/* If the workaround fails due to memory/mapping
8132 		 * failure, silently drop this packet.
8133 		 */
8134 		entry = tnapi->tx_prod;
8135 		budget = tg3_tx_avail(tnapi);
8136 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8137 						base_flags, mss, vlan))
8138 			goto drop_nofree;
8139 	}
8140 
8141 	skb_tx_timestamp(skb);
8142 	netdev_tx_sent_queue(txq, skb->len);
8143 
8144 	/* Sync BD data before updating mailbox */
8145 	wmb();
8146 
8147 	tnapi->tx_prod = entry;
8148 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8149 		netif_tx_stop_queue(txq);
8150 
8151 		/* netif_tx_stop_queue() must be done before checking
8152 		 * checking tx index in tg3_tx_avail() below, because in
8153 		 * tg3_tx(), we update tx index before checking for
8154 		 * netif_tx_queue_stopped().
8155 		 */
8156 		smp_mb();
8157 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8158 			netif_tx_wake_queue(txq);
8159 	}
8160 
8161 	if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8162 		/* Packets are ready, update Tx producer idx on card. */
8163 		tw32_tx_mbox(tnapi->prodmbox, entry);
8164 	}
8165 
8166 	return NETDEV_TX_OK;
8167 
8168 dma_error:
8169 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8170 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8171 drop:
8172 	dev_kfree_skb_any(skb);
8173 drop_nofree:
8174 	tnapi->tx_dropped++;
8175 	return NETDEV_TX_OK;
8176 }
8177 
tg3_mac_loopback(struct tg3 * tp,bool enable)8178 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8179 {
8180 	if (enable) {
8181 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8182 				  MAC_MODE_PORT_MODE_MASK);
8183 
8184 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8185 
8186 		if (!tg3_flag(tp, 5705_PLUS))
8187 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8188 
8189 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8190 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8191 		else
8192 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8193 	} else {
8194 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8195 
8196 		if (tg3_flag(tp, 5705_PLUS) ||
8197 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8198 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8199 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8200 	}
8201 
8202 	tw32(MAC_MODE, tp->mac_mode);
8203 	udelay(40);
8204 }
8205 
tg3_phy_lpbk_set(struct tg3 * tp,u32 speed,bool extlpbk)8206 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8207 {
8208 	u32 val, bmcr, mac_mode, ptest = 0;
8209 
8210 	tg3_phy_toggle_apd(tp, false);
8211 	tg3_phy_toggle_automdix(tp, false);
8212 
8213 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8214 		return -EIO;
8215 
8216 	bmcr = BMCR_FULLDPLX;
8217 	switch (speed) {
8218 	case SPEED_10:
8219 		break;
8220 	case SPEED_100:
8221 		bmcr |= BMCR_SPEED100;
8222 		break;
8223 	case SPEED_1000:
8224 	default:
8225 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8226 			speed = SPEED_100;
8227 			bmcr |= BMCR_SPEED100;
8228 		} else {
8229 			speed = SPEED_1000;
8230 			bmcr |= BMCR_SPEED1000;
8231 		}
8232 	}
8233 
8234 	if (extlpbk) {
8235 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8236 			tg3_readphy(tp, MII_CTRL1000, &val);
8237 			val |= CTL1000_AS_MASTER |
8238 			       CTL1000_ENABLE_MASTER;
8239 			tg3_writephy(tp, MII_CTRL1000, val);
8240 		} else {
8241 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8242 				MII_TG3_FET_PTEST_TRIM_2;
8243 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8244 		}
8245 	} else
8246 		bmcr |= BMCR_LOOPBACK;
8247 
8248 	tg3_writephy(tp, MII_BMCR, bmcr);
8249 
8250 	/* The write needs to be flushed for the FETs */
8251 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8252 		tg3_readphy(tp, MII_BMCR, &bmcr);
8253 
8254 	udelay(40);
8255 
8256 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8257 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8258 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8259 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8260 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8261 
8262 		/* The write needs to be flushed for the AC131 */
8263 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8264 	}
8265 
8266 	/* Reset to prevent losing 1st rx packet intermittently */
8267 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8268 	    tg3_flag(tp, 5780_CLASS)) {
8269 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8270 		udelay(10);
8271 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8272 	}
8273 
8274 	mac_mode = tp->mac_mode &
8275 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8276 	if (speed == SPEED_1000)
8277 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8278 	else
8279 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8280 
8281 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8282 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8283 
8284 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8285 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8286 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8287 			mac_mode |= MAC_MODE_LINK_POLARITY;
8288 
8289 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8290 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8291 	}
8292 
8293 	tw32(MAC_MODE, mac_mode);
8294 	udelay(40);
8295 
8296 	return 0;
8297 }
8298 
tg3_set_loopback(struct net_device * dev,netdev_features_t features)8299 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8300 {
8301 	struct tg3 *tp = netdev_priv(dev);
8302 
8303 	if (features & NETIF_F_LOOPBACK) {
8304 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8305 			return;
8306 
8307 		spin_lock_bh(&tp->lock);
8308 		tg3_mac_loopback(tp, true);
8309 		netif_carrier_on(tp->dev);
8310 		spin_unlock_bh(&tp->lock);
8311 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8312 	} else {
8313 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8314 			return;
8315 
8316 		spin_lock_bh(&tp->lock);
8317 		tg3_mac_loopback(tp, false);
8318 		/* Force link status check */
8319 		tg3_setup_phy(tp, true);
8320 		spin_unlock_bh(&tp->lock);
8321 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8322 	}
8323 }
8324 
tg3_fix_features(struct net_device * dev,netdev_features_t features)8325 static netdev_features_t tg3_fix_features(struct net_device *dev,
8326 	netdev_features_t features)
8327 {
8328 	struct tg3 *tp = netdev_priv(dev);
8329 
8330 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8331 		features &= ~NETIF_F_ALL_TSO;
8332 
8333 	return features;
8334 }
8335 
tg3_set_features(struct net_device * dev,netdev_features_t features)8336 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8337 {
8338 	netdev_features_t changed = dev->features ^ features;
8339 
8340 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8341 		tg3_set_loopback(dev, features);
8342 
8343 	return 0;
8344 }
8345 
tg3_rx_prodring_free(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8346 static void tg3_rx_prodring_free(struct tg3 *tp,
8347 				 struct tg3_rx_prodring_set *tpr)
8348 {
8349 	int i;
8350 
8351 	if (tpr != &tp->napi[0].prodring) {
8352 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8353 		     i = (i + 1) & tp->rx_std_ring_mask)
8354 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8355 					tp->rx_pkt_map_sz);
8356 
8357 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8358 			for (i = tpr->rx_jmb_cons_idx;
8359 			     i != tpr->rx_jmb_prod_idx;
8360 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8361 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8362 						TG3_RX_JMB_MAP_SZ);
8363 			}
8364 		}
8365 
8366 		return;
8367 	}
8368 
8369 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8370 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8371 				tp->rx_pkt_map_sz);
8372 
8373 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8374 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8375 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8376 					TG3_RX_JMB_MAP_SZ);
8377 	}
8378 }
8379 
8380 /* Initialize rx rings for packet processing.
8381  *
8382  * The chip has been shut down and the driver detached from
8383  * the networking, so no interrupts or new tx packets will
8384  * end up in the driver.  tp->{tx,}lock are held and thus
8385  * we may not sleep.
8386  */
tg3_rx_prodring_alloc(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8387 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8388 				 struct tg3_rx_prodring_set *tpr)
8389 {
8390 	u32 i, rx_pkt_dma_sz;
8391 
8392 	tpr->rx_std_cons_idx = 0;
8393 	tpr->rx_std_prod_idx = 0;
8394 	tpr->rx_jmb_cons_idx = 0;
8395 	tpr->rx_jmb_prod_idx = 0;
8396 
8397 	if (tpr != &tp->napi[0].prodring) {
8398 		memset(&tpr->rx_std_buffers[0], 0,
8399 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8400 		if (tpr->rx_jmb_buffers)
8401 			memset(&tpr->rx_jmb_buffers[0], 0,
8402 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8403 		goto done;
8404 	}
8405 
8406 	/* Zero out all descriptors. */
8407 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8408 
8409 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8410 	if (tg3_flag(tp, 5780_CLASS) &&
8411 	    tp->dev->mtu > ETH_DATA_LEN)
8412 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8413 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8414 
8415 	/* Initialize invariants of the rings, we only set this
8416 	 * stuff once.  This works because the card does not
8417 	 * write into the rx buffer posting rings.
8418 	 */
8419 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8420 		struct tg3_rx_buffer_desc *rxd;
8421 
8422 		rxd = &tpr->rx_std[i];
8423 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8424 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8425 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8426 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8427 	}
8428 
8429 	/* Now allocate fresh SKBs for each rx ring. */
8430 	for (i = 0; i < tp->rx_pending; i++) {
8431 		unsigned int frag_size;
8432 
8433 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8434 				      &frag_size) < 0) {
8435 			netdev_warn(tp->dev,
8436 				    "Using a smaller RX standard ring. Only "
8437 				    "%d out of %d buffers were allocated "
8438 				    "successfully\n", i, tp->rx_pending);
8439 			if (i == 0)
8440 				goto initfail;
8441 			tp->rx_pending = i;
8442 			break;
8443 		}
8444 	}
8445 
8446 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8447 		goto done;
8448 
8449 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8450 
8451 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8452 		goto done;
8453 
8454 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8455 		struct tg3_rx_buffer_desc *rxd;
8456 
8457 		rxd = &tpr->rx_jmb[i].std;
8458 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8459 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8460 				  RXD_FLAG_JUMBO;
8461 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8462 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8463 	}
8464 
8465 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8466 		unsigned int frag_size;
8467 
8468 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8469 				      &frag_size) < 0) {
8470 			netdev_warn(tp->dev,
8471 				    "Using a smaller RX jumbo ring. Only %d "
8472 				    "out of %d buffers were allocated "
8473 				    "successfully\n", i, tp->rx_jumbo_pending);
8474 			if (i == 0)
8475 				goto initfail;
8476 			tp->rx_jumbo_pending = i;
8477 			break;
8478 		}
8479 	}
8480 
8481 done:
8482 	return 0;
8483 
8484 initfail:
8485 	tg3_rx_prodring_free(tp, tpr);
8486 	return -ENOMEM;
8487 }
8488 
tg3_rx_prodring_fini(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8489 static void tg3_rx_prodring_fini(struct tg3 *tp,
8490 				 struct tg3_rx_prodring_set *tpr)
8491 {
8492 	kfree(tpr->rx_std_buffers);
8493 	tpr->rx_std_buffers = NULL;
8494 	kfree(tpr->rx_jmb_buffers);
8495 	tpr->rx_jmb_buffers = NULL;
8496 	if (tpr->rx_std) {
8497 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8498 				  tpr->rx_std, tpr->rx_std_mapping);
8499 		tpr->rx_std = NULL;
8500 	}
8501 	if (tpr->rx_jmb) {
8502 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8503 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8504 		tpr->rx_jmb = NULL;
8505 	}
8506 }
8507 
tg3_rx_prodring_init(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8508 static int tg3_rx_prodring_init(struct tg3 *tp,
8509 				struct tg3_rx_prodring_set *tpr)
8510 {
8511 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8512 				      GFP_KERNEL);
8513 	if (!tpr->rx_std_buffers)
8514 		return -ENOMEM;
8515 
8516 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8517 					 TG3_RX_STD_RING_BYTES(tp),
8518 					 &tpr->rx_std_mapping,
8519 					 GFP_KERNEL);
8520 	if (!tpr->rx_std)
8521 		goto err_out;
8522 
8523 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8524 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8525 					      GFP_KERNEL);
8526 		if (!tpr->rx_jmb_buffers)
8527 			goto err_out;
8528 
8529 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8530 						 TG3_RX_JMB_RING_BYTES(tp),
8531 						 &tpr->rx_jmb_mapping,
8532 						 GFP_KERNEL);
8533 		if (!tpr->rx_jmb)
8534 			goto err_out;
8535 	}
8536 
8537 	return 0;
8538 
8539 err_out:
8540 	tg3_rx_prodring_fini(tp, tpr);
8541 	return -ENOMEM;
8542 }
8543 
8544 /* Free up pending packets in all rx/tx rings.
8545  *
8546  * The chip has been shut down and the driver detached from
8547  * the networking, so no interrupts or new tx packets will
8548  * end up in the driver.  tp->{tx,}lock is not held and we are not
8549  * in an interrupt context and thus may sleep.
8550  */
tg3_free_rings(struct tg3 * tp)8551 static void tg3_free_rings(struct tg3 *tp)
8552 {
8553 	int i, j;
8554 
8555 	for (j = 0; j < tp->irq_cnt; j++) {
8556 		struct tg3_napi *tnapi = &tp->napi[j];
8557 
8558 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8559 
8560 		if (!tnapi->tx_buffers)
8561 			continue;
8562 
8563 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8564 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8565 
8566 			if (!skb)
8567 				continue;
8568 
8569 			tg3_tx_skb_unmap(tnapi, i,
8570 					 skb_shinfo(skb)->nr_frags - 1);
8571 
8572 			dev_consume_skb_any(skb);
8573 		}
8574 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8575 	}
8576 }
8577 
8578 /* Initialize tx/rx rings for packet processing.
8579  *
8580  * The chip has been shut down and the driver detached from
8581  * the networking, so no interrupts or new tx packets will
8582  * end up in the driver.  tp->{tx,}lock are held and thus
8583  * we may not sleep.
8584  */
tg3_init_rings(struct tg3 * tp)8585 static int tg3_init_rings(struct tg3 *tp)
8586 {
8587 	int i;
8588 
8589 	/* Free up all the SKBs. */
8590 	tg3_free_rings(tp);
8591 
8592 	for (i = 0; i < tp->irq_cnt; i++) {
8593 		struct tg3_napi *tnapi = &tp->napi[i];
8594 
8595 		tnapi->last_tag = 0;
8596 		tnapi->last_irq_tag = 0;
8597 		tnapi->hw_status->status = 0;
8598 		tnapi->hw_status->status_tag = 0;
8599 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8600 
8601 		tnapi->tx_prod = 0;
8602 		tnapi->tx_cons = 0;
8603 		if (tnapi->tx_ring)
8604 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8605 
8606 		tnapi->rx_rcb_ptr = 0;
8607 		if (tnapi->rx_rcb)
8608 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8609 
8610 		if (tnapi->prodring.rx_std &&
8611 		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8612 			tg3_free_rings(tp);
8613 			return -ENOMEM;
8614 		}
8615 	}
8616 
8617 	return 0;
8618 }
8619 
tg3_mem_tx_release(struct tg3 * tp)8620 static void tg3_mem_tx_release(struct tg3 *tp)
8621 {
8622 	int i;
8623 
8624 	for (i = 0; i < tp->irq_max; i++) {
8625 		struct tg3_napi *tnapi = &tp->napi[i];
8626 
8627 		if (tnapi->tx_ring) {
8628 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8629 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8630 			tnapi->tx_ring = NULL;
8631 		}
8632 
8633 		kfree(tnapi->tx_buffers);
8634 		tnapi->tx_buffers = NULL;
8635 	}
8636 }
8637 
tg3_mem_tx_acquire(struct tg3 * tp)8638 static int tg3_mem_tx_acquire(struct tg3 *tp)
8639 {
8640 	int i;
8641 	struct tg3_napi *tnapi = &tp->napi[0];
8642 
8643 	/* If multivector TSS is enabled, vector 0 does not handle
8644 	 * tx interrupts.  Don't allocate any resources for it.
8645 	 */
8646 	if (tg3_flag(tp, ENABLE_TSS))
8647 		tnapi++;
8648 
8649 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8650 		tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8651 					    sizeof(struct tg3_tx_ring_info),
8652 					    GFP_KERNEL);
8653 		if (!tnapi->tx_buffers)
8654 			goto err_out;
8655 
8656 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8657 						    TG3_TX_RING_BYTES,
8658 						    &tnapi->tx_desc_mapping,
8659 						    GFP_KERNEL);
8660 		if (!tnapi->tx_ring)
8661 			goto err_out;
8662 	}
8663 
8664 	return 0;
8665 
8666 err_out:
8667 	tg3_mem_tx_release(tp);
8668 	return -ENOMEM;
8669 }
8670 
tg3_mem_rx_release(struct tg3 * tp)8671 static void tg3_mem_rx_release(struct tg3 *tp)
8672 {
8673 	int i;
8674 
8675 	for (i = 0; i < tp->irq_max; i++) {
8676 		struct tg3_napi *tnapi = &tp->napi[i];
8677 
8678 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8679 
8680 		if (!tnapi->rx_rcb)
8681 			continue;
8682 
8683 		dma_free_coherent(&tp->pdev->dev,
8684 				  TG3_RX_RCB_RING_BYTES(tp),
8685 				  tnapi->rx_rcb,
8686 				  tnapi->rx_rcb_mapping);
8687 		tnapi->rx_rcb = NULL;
8688 	}
8689 }
8690 
tg3_mem_rx_acquire(struct tg3 * tp)8691 static int tg3_mem_rx_acquire(struct tg3 *tp)
8692 {
8693 	unsigned int i, limit;
8694 
8695 	limit = tp->rxq_cnt;
8696 
8697 	/* If RSS is enabled, we need a (dummy) producer ring
8698 	 * set on vector zero.  This is the true hw prodring.
8699 	 */
8700 	if (tg3_flag(tp, ENABLE_RSS))
8701 		limit++;
8702 
8703 	for (i = 0; i < limit; i++) {
8704 		struct tg3_napi *tnapi = &tp->napi[i];
8705 
8706 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8707 			goto err_out;
8708 
8709 		/* If multivector RSS is enabled, vector 0
8710 		 * does not handle rx or tx interrupts.
8711 		 * Don't allocate any resources for it.
8712 		 */
8713 		if (!i && tg3_flag(tp, ENABLE_RSS))
8714 			continue;
8715 
8716 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8717 						   TG3_RX_RCB_RING_BYTES(tp),
8718 						   &tnapi->rx_rcb_mapping,
8719 						   GFP_KERNEL);
8720 		if (!tnapi->rx_rcb)
8721 			goto err_out;
8722 	}
8723 
8724 	return 0;
8725 
8726 err_out:
8727 	tg3_mem_rx_release(tp);
8728 	return -ENOMEM;
8729 }
8730 
8731 /*
8732  * Must not be invoked with interrupt sources disabled and
8733  * the hardware shutdown down.
8734  */
tg3_free_consistent(struct tg3 * tp)8735 static void tg3_free_consistent(struct tg3 *tp)
8736 {
8737 	int i;
8738 
8739 	for (i = 0; i < tp->irq_cnt; i++) {
8740 		struct tg3_napi *tnapi = &tp->napi[i];
8741 
8742 		if (tnapi->hw_status) {
8743 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8744 					  tnapi->hw_status,
8745 					  tnapi->status_mapping);
8746 			tnapi->hw_status = NULL;
8747 		}
8748 	}
8749 
8750 	tg3_mem_rx_release(tp);
8751 	tg3_mem_tx_release(tp);
8752 
8753 	/* tp->hw_stats can be referenced safely:
8754 	 *     1. under rtnl_lock
8755 	 *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8756 	 */
8757 	if (tp->hw_stats) {
8758 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8759 				  tp->hw_stats, tp->stats_mapping);
8760 		tp->hw_stats = NULL;
8761 	}
8762 }
8763 
8764 /*
8765  * Must not be invoked with interrupt sources disabled and
8766  * the hardware shutdown down.  Can sleep.
8767  */
tg3_alloc_consistent(struct tg3 * tp)8768 static int tg3_alloc_consistent(struct tg3 *tp)
8769 {
8770 	int i;
8771 
8772 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8773 					  sizeof(struct tg3_hw_stats),
8774 					  &tp->stats_mapping, GFP_KERNEL);
8775 	if (!tp->hw_stats)
8776 		goto err_out;
8777 
8778 	for (i = 0; i < tp->irq_cnt; i++) {
8779 		struct tg3_napi *tnapi = &tp->napi[i];
8780 		struct tg3_hw_status *sblk;
8781 
8782 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8783 						      TG3_HW_STATUS_SIZE,
8784 						      &tnapi->status_mapping,
8785 						      GFP_KERNEL);
8786 		if (!tnapi->hw_status)
8787 			goto err_out;
8788 
8789 		sblk = tnapi->hw_status;
8790 
8791 		if (tg3_flag(tp, ENABLE_RSS)) {
8792 			u16 *prodptr = NULL;
8793 
8794 			/*
8795 			 * When RSS is enabled, the status block format changes
8796 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8797 			 * and "rx_mini_consumer" members get mapped to the
8798 			 * other three rx return ring producer indexes.
8799 			 */
8800 			switch (i) {
8801 			case 1:
8802 				prodptr = &sblk->idx[0].rx_producer;
8803 				break;
8804 			case 2:
8805 				prodptr = &sblk->rx_jumbo_consumer;
8806 				break;
8807 			case 3:
8808 				prodptr = &sblk->reserved;
8809 				break;
8810 			case 4:
8811 				prodptr = &sblk->rx_mini_consumer;
8812 				break;
8813 			}
8814 			tnapi->rx_rcb_prod_idx = prodptr;
8815 		} else {
8816 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8817 		}
8818 	}
8819 
8820 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8821 		goto err_out;
8822 
8823 	return 0;
8824 
8825 err_out:
8826 	tg3_free_consistent(tp);
8827 	return -ENOMEM;
8828 }
8829 
8830 #define MAX_WAIT_CNT 1000
8831 
8832 /* To stop a block, clear the enable bit and poll till it
8833  * clears.  tp->lock is held.
8834  */
tg3_stop_block(struct tg3 * tp,unsigned long ofs,u32 enable_bit,bool silent)8835 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8836 {
8837 	unsigned int i;
8838 	u32 val;
8839 
8840 	if (tg3_flag(tp, 5705_PLUS)) {
8841 		switch (ofs) {
8842 		case RCVLSC_MODE:
8843 		case DMAC_MODE:
8844 		case MBFREE_MODE:
8845 		case BUFMGR_MODE:
8846 		case MEMARB_MODE:
8847 			/* We can't enable/disable these bits of the
8848 			 * 5705/5750, just say success.
8849 			 */
8850 			return 0;
8851 
8852 		default:
8853 			break;
8854 		}
8855 	}
8856 
8857 	val = tr32(ofs);
8858 	val &= ~enable_bit;
8859 	tw32_f(ofs, val);
8860 
8861 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8862 		if (pci_channel_offline(tp->pdev)) {
8863 			dev_err(&tp->pdev->dev,
8864 				"tg3_stop_block device offline, "
8865 				"ofs=%lx enable_bit=%x\n",
8866 				ofs, enable_bit);
8867 			return -ENODEV;
8868 		}
8869 
8870 		udelay(100);
8871 		val = tr32(ofs);
8872 		if ((val & enable_bit) == 0)
8873 			break;
8874 	}
8875 
8876 	if (i == MAX_WAIT_CNT && !silent) {
8877 		dev_err(&tp->pdev->dev,
8878 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8879 			ofs, enable_bit);
8880 		return -ENODEV;
8881 	}
8882 
8883 	return 0;
8884 }
8885 
8886 /* tp->lock is held. */
tg3_abort_hw(struct tg3 * tp,bool silent)8887 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8888 {
8889 	int i, err;
8890 
8891 	tg3_disable_ints(tp);
8892 
8893 	if (pci_channel_offline(tp->pdev)) {
8894 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8895 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8896 		err = -ENODEV;
8897 		goto err_no_dev;
8898 	}
8899 
8900 	tp->rx_mode &= ~RX_MODE_ENABLE;
8901 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8902 	udelay(10);
8903 
8904 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8905 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8906 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8907 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8908 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8909 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8910 
8911 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8912 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8913 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8914 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8915 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8916 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8917 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8918 
8919 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8920 	tw32_f(MAC_MODE, tp->mac_mode);
8921 	udelay(40);
8922 
8923 	tp->tx_mode &= ~TX_MODE_ENABLE;
8924 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8925 
8926 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8927 		udelay(100);
8928 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8929 			break;
8930 	}
8931 	if (i >= MAX_WAIT_CNT) {
8932 		dev_err(&tp->pdev->dev,
8933 			"%s timed out, TX_MODE_ENABLE will not clear "
8934 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8935 		err |= -ENODEV;
8936 	}
8937 
8938 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8939 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8940 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8941 
8942 	tw32(FTQ_RESET, 0xffffffff);
8943 	tw32(FTQ_RESET, 0x00000000);
8944 
8945 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8946 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8947 
8948 err_no_dev:
8949 	for (i = 0; i < tp->irq_cnt; i++) {
8950 		struct tg3_napi *tnapi = &tp->napi[i];
8951 		if (tnapi->hw_status)
8952 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8953 	}
8954 
8955 	return err;
8956 }
8957 
8958 /* Save PCI command register before chip reset */
tg3_save_pci_state(struct tg3 * tp)8959 static void tg3_save_pci_state(struct tg3 *tp)
8960 {
8961 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8962 }
8963 
8964 /* Restore PCI state after chip reset */
tg3_restore_pci_state(struct tg3 * tp)8965 static void tg3_restore_pci_state(struct tg3 *tp)
8966 {
8967 	u32 val;
8968 
8969 	/* Re-enable indirect register accesses. */
8970 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8971 			       tp->misc_host_ctrl);
8972 
8973 	/* Set MAX PCI retry to zero. */
8974 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8975 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8976 	    tg3_flag(tp, PCIX_MODE))
8977 		val |= PCISTATE_RETRY_SAME_DMA;
8978 	/* Allow reads and writes to the APE register and memory space. */
8979 	if (tg3_flag(tp, ENABLE_APE))
8980 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8981 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8982 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8983 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8984 
8985 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8986 
8987 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8988 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8989 				      tp->pci_cacheline_sz);
8990 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8991 				      tp->pci_lat_timer);
8992 	}
8993 
8994 	/* Make sure PCI-X relaxed ordering bit is clear. */
8995 	if (tg3_flag(tp, PCIX_MODE)) {
8996 		u16 pcix_cmd;
8997 
8998 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8999 				     &pcix_cmd);
9000 		pcix_cmd &= ~PCI_X_CMD_ERO;
9001 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9002 				      pcix_cmd);
9003 	}
9004 
9005 	if (tg3_flag(tp, 5780_CLASS)) {
9006 
9007 		/* Chip reset on 5780 will reset MSI enable bit,
9008 		 * so need to restore it.
9009 		 */
9010 		if (tg3_flag(tp, USING_MSI)) {
9011 			u16 ctrl;
9012 
9013 			pci_read_config_word(tp->pdev,
9014 					     tp->msi_cap + PCI_MSI_FLAGS,
9015 					     &ctrl);
9016 			pci_write_config_word(tp->pdev,
9017 					      tp->msi_cap + PCI_MSI_FLAGS,
9018 					      ctrl | PCI_MSI_FLAGS_ENABLE);
9019 			val = tr32(MSGINT_MODE);
9020 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9021 		}
9022 	}
9023 }
9024 
tg3_override_clk(struct tg3 * tp)9025 static void tg3_override_clk(struct tg3 *tp)
9026 {
9027 	u32 val;
9028 
9029 	switch (tg3_asic_rev(tp)) {
9030 	case ASIC_REV_5717:
9031 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9032 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9033 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9034 		break;
9035 
9036 	case ASIC_REV_5719:
9037 	case ASIC_REV_5720:
9038 		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9039 		break;
9040 
9041 	default:
9042 		return;
9043 	}
9044 }
9045 
tg3_restore_clk(struct tg3 * tp)9046 static void tg3_restore_clk(struct tg3 *tp)
9047 {
9048 	u32 val;
9049 
9050 	switch (tg3_asic_rev(tp)) {
9051 	case ASIC_REV_5717:
9052 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9053 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9054 		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9055 		break;
9056 
9057 	case ASIC_REV_5719:
9058 	case ASIC_REV_5720:
9059 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9060 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9061 		break;
9062 
9063 	default:
9064 		return;
9065 	}
9066 }
9067 
9068 /* tp->lock is held. */
tg3_chip_reset(struct tg3 * tp)9069 static int tg3_chip_reset(struct tg3 *tp)
9070 	__releases(tp->lock)
9071 	__acquires(tp->lock)
9072 {
9073 	u32 val;
9074 	void (*write_op)(struct tg3 *, u32, u32);
9075 	int i, err;
9076 
9077 	if (!pci_device_is_present(tp->pdev))
9078 		return -ENODEV;
9079 
9080 	tg3_nvram_lock(tp);
9081 
9082 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9083 
9084 	/* No matching tg3_nvram_unlock() after this because
9085 	 * chip reset below will undo the nvram lock.
9086 	 */
9087 	tp->nvram_lock_cnt = 0;
9088 
9089 	/* GRC_MISC_CFG core clock reset will clear the memory
9090 	 * enable bit in PCI register 4 and the MSI enable bit
9091 	 * on some chips, so we save relevant registers here.
9092 	 */
9093 	tg3_save_pci_state(tp);
9094 
9095 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9096 	    tg3_flag(tp, 5755_PLUS))
9097 		tw32(GRC_FASTBOOT_PC, 0);
9098 
9099 	/*
9100 	 * We must avoid the readl() that normally takes place.
9101 	 * It locks machines, causes machine checks, and other
9102 	 * fun things.  So, temporarily disable the 5701
9103 	 * hardware workaround, while we do the reset.
9104 	 */
9105 	write_op = tp->write32;
9106 	if (write_op == tg3_write_flush_reg32)
9107 		tp->write32 = tg3_write32;
9108 
9109 	/* Prevent the irq handler from reading or writing PCI registers
9110 	 * during chip reset when the memory enable bit in the PCI command
9111 	 * register may be cleared.  The chip does not generate interrupt
9112 	 * at this time, but the irq handler may still be called due to irq
9113 	 * sharing or irqpoll.
9114 	 */
9115 	tg3_flag_set(tp, CHIP_RESETTING);
9116 	for (i = 0; i < tp->irq_cnt; i++) {
9117 		struct tg3_napi *tnapi = &tp->napi[i];
9118 		if (tnapi->hw_status) {
9119 			tnapi->hw_status->status = 0;
9120 			tnapi->hw_status->status_tag = 0;
9121 		}
9122 		tnapi->last_tag = 0;
9123 		tnapi->last_irq_tag = 0;
9124 	}
9125 	smp_mb();
9126 
9127 	tg3_full_unlock(tp);
9128 
9129 	for (i = 0; i < tp->irq_cnt; i++)
9130 		synchronize_irq(tp->napi[i].irq_vec);
9131 
9132 	tg3_full_lock(tp, 0);
9133 
9134 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9135 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9136 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9137 	}
9138 
9139 	/* do the reset */
9140 	val = GRC_MISC_CFG_CORECLK_RESET;
9141 
9142 	if (tg3_flag(tp, PCI_EXPRESS)) {
9143 		/* Force PCIe 1.0a mode */
9144 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9145 		    !tg3_flag(tp, 57765_PLUS) &&
9146 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
9147 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9148 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9149 
9150 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9151 			tw32(GRC_MISC_CFG, (1 << 29));
9152 			val |= (1 << 29);
9153 		}
9154 	}
9155 
9156 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9157 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9158 		tw32(GRC_VCPU_EXT_CTRL,
9159 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9160 	}
9161 
9162 	/* Set the clock to the highest frequency to avoid timeouts. With link
9163 	 * aware mode, the clock speed could be slow and bootcode does not
9164 	 * complete within the expected time. Override the clock to allow the
9165 	 * bootcode to finish sooner and then restore it.
9166 	 */
9167 	tg3_override_clk(tp);
9168 
9169 	/* Manage gphy power for all CPMU absent PCIe devices. */
9170 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9171 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9172 
9173 	tw32(GRC_MISC_CFG, val);
9174 
9175 	/* restore 5701 hardware bug workaround write method */
9176 	tp->write32 = write_op;
9177 
9178 	/* Unfortunately, we have to delay before the PCI read back.
9179 	 * Some 575X chips even will not respond to a PCI cfg access
9180 	 * when the reset command is given to the chip.
9181 	 *
9182 	 * How do these hardware designers expect things to work
9183 	 * properly if the PCI write is posted for a long period
9184 	 * of time?  It is always necessary to have some method by
9185 	 * which a register read back can occur to push the write
9186 	 * out which does the reset.
9187 	 *
9188 	 * For most tg3 variants the trick below was working.
9189 	 * Ho hum...
9190 	 */
9191 	udelay(120);
9192 
9193 	/* Flush PCI posted writes.  The normal MMIO registers
9194 	 * are inaccessible at this time so this is the only
9195 	 * way to make this reliably (actually, this is no longer
9196 	 * the case, see above).  I tried to use indirect
9197 	 * register read/write but this upset some 5701 variants.
9198 	 */
9199 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9200 
9201 	udelay(120);
9202 
9203 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9204 		u16 val16;
9205 
9206 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9207 			int j;
9208 			u32 cfg_val;
9209 
9210 			/* Wait for link training to complete.  */
9211 			for (j = 0; j < 5000; j++)
9212 				udelay(100);
9213 
9214 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9215 			pci_write_config_dword(tp->pdev, 0xc4,
9216 					       cfg_val | (1 << 15));
9217 		}
9218 
9219 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9220 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9221 		/*
9222 		 * Older PCIe devices only support the 128 byte
9223 		 * MPS setting.  Enforce the restriction.
9224 		 */
9225 		if (!tg3_flag(tp, CPMU_PRESENT))
9226 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9227 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9228 
9229 		/* Clear error status */
9230 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9231 				      PCI_EXP_DEVSTA_CED |
9232 				      PCI_EXP_DEVSTA_NFED |
9233 				      PCI_EXP_DEVSTA_FED |
9234 				      PCI_EXP_DEVSTA_URD);
9235 	}
9236 
9237 	tg3_restore_pci_state(tp);
9238 
9239 	tg3_flag_clear(tp, CHIP_RESETTING);
9240 	tg3_flag_clear(tp, ERROR_PROCESSED);
9241 
9242 	val = 0;
9243 	if (tg3_flag(tp, 5780_CLASS))
9244 		val = tr32(MEMARB_MODE);
9245 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9246 
9247 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9248 		tg3_stop_fw(tp);
9249 		tw32(0x5000, 0x400);
9250 	}
9251 
9252 	if (tg3_flag(tp, IS_SSB_CORE)) {
9253 		/*
9254 		 * BCM4785: In order to avoid repercussions from using
9255 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9256 		 * which is not required.
9257 		 */
9258 		tg3_stop_fw(tp);
9259 		tg3_halt_cpu(tp, RX_CPU_BASE);
9260 	}
9261 
9262 	err = tg3_poll_fw(tp);
9263 	if (err)
9264 		return err;
9265 
9266 	tw32(GRC_MODE, tp->grc_mode);
9267 
9268 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9269 		val = tr32(0xc4);
9270 
9271 		tw32(0xc4, val | (1 << 15));
9272 	}
9273 
9274 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9275 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9276 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9277 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9278 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9279 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9280 	}
9281 
9282 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9283 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9284 		val = tp->mac_mode;
9285 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9286 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9287 		val = tp->mac_mode;
9288 	} else
9289 		val = 0;
9290 
9291 	tw32_f(MAC_MODE, val);
9292 	udelay(40);
9293 
9294 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9295 
9296 	tg3_mdio_start(tp);
9297 
9298 	if (tg3_flag(tp, PCI_EXPRESS) &&
9299 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9300 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9301 	    !tg3_flag(tp, 57765_PLUS)) {
9302 		val = tr32(0x7c00);
9303 
9304 		tw32(0x7c00, val | (1 << 25));
9305 	}
9306 
9307 	tg3_restore_clk(tp);
9308 
9309 	/* Increase the core clock speed to fix tx timeout issue for 5762
9310 	 * with 100Mbps link speed.
9311 	 */
9312 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9313 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9314 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9315 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9316 	}
9317 
9318 	/* Reprobe ASF enable state.  */
9319 	tg3_flag_clear(tp, ENABLE_ASF);
9320 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9321 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9322 
9323 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9324 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9325 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9326 		u32 nic_cfg;
9327 
9328 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9329 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9330 			tg3_flag_set(tp, ENABLE_ASF);
9331 			tp->last_event_jiffies = jiffies;
9332 			if (tg3_flag(tp, 5750_PLUS))
9333 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9334 
9335 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9336 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9337 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9338 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9339 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9340 		}
9341 	}
9342 
9343 	return 0;
9344 }
9345 
9346 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9347 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9348 static void __tg3_set_rx_mode(struct net_device *);
9349 
9350 /* tp->lock is held. */
tg3_halt(struct tg3 * tp,int kind,bool silent)9351 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9352 {
9353 	int err, i;
9354 
9355 	tg3_stop_fw(tp);
9356 
9357 	tg3_write_sig_pre_reset(tp, kind);
9358 
9359 	tg3_abort_hw(tp, silent);
9360 	err = tg3_chip_reset(tp);
9361 
9362 	__tg3_set_mac_addr(tp, false);
9363 
9364 	tg3_write_sig_legacy(tp, kind);
9365 	tg3_write_sig_post_reset(tp, kind);
9366 
9367 	if (tp->hw_stats) {
9368 		/* Save the stats across chip resets... */
9369 		tg3_get_nstats(tp, &tp->net_stats_prev);
9370 		tg3_get_estats(tp, &tp->estats_prev);
9371 
9372 		/* And make sure the next sample is new data */
9373 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9374 
9375 		for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
9376 			struct tg3_napi *tnapi = &tp->napi[i];
9377 
9378 			tnapi->rx_dropped = 0;
9379 			tnapi->tx_dropped = 0;
9380 		}
9381 	}
9382 
9383 	return err;
9384 }
9385 
tg3_set_mac_addr(struct net_device * dev,void * p)9386 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9387 {
9388 	struct tg3 *tp = netdev_priv(dev);
9389 	struct sockaddr *addr = p;
9390 	int err = 0;
9391 	bool skip_mac_1 = false;
9392 
9393 	if (!is_valid_ether_addr(addr->sa_data))
9394 		return -EADDRNOTAVAIL;
9395 
9396 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9397 
9398 	if (!netif_running(dev))
9399 		return 0;
9400 
9401 	if (tg3_flag(tp, ENABLE_ASF)) {
9402 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9403 
9404 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9405 		addr0_low = tr32(MAC_ADDR_0_LOW);
9406 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9407 		addr1_low = tr32(MAC_ADDR_1_LOW);
9408 
9409 		/* Skip MAC addr 1 if ASF is using it. */
9410 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9411 		    !(addr1_high == 0 && addr1_low == 0))
9412 			skip_mac_1 = true;
9413 	}
9414 	spin_lock_bh(&tp->lock);
9415 	__tg3_set_mac_addr(tp, skip_mac_1);
9416 	__tg3_set_rx_mode(dev);
9417 	spin_unlock_bh(&tp->lock);
9418 
9419 	return err;
9420 }
9421 
9422 /* tp->lock is held. */
tg3_set_bdinfo(struct tg3 * tp,u32 bdinfo_addr,dma_addr_t mapping,u32 maxlen_flags,u32 nic_addr)9423 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9424 			   dma_addr_t mapping, u32 maxlen_flags,
9425 			   u32 nic_addr)
9426 {
9427 	tg3_write_mem(tp,
9428 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9429 		      ((u64) mapping >> 32));
9430 	tg3_write_mem(tp,
9431 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9432 		      ((u64) mapping & 0xffffffff));
9433 	tg3_write_mem(tp,
9434 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9435 		       maxlen_flags);
9436 
9437 	if (!tg3_flag(tp, 5705_PLUS))
9438 		tg3_write_mem(tp,
9439 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9440 			      nic_addr);
9441 }
9442 
9443 
tg3_coal_tx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9444 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9445 {
9446 	int i = 0;
9447 
9448 	if (!tg3_flag(tp, ENABLE_TSS)) {
9449 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9450 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9451 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9452 	} else {
9453 		tw32(HOSTCC_TXCOL_TICKS, 0);
9454 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9455 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9456 
9457 		for (; i < tp->txq_cnt; i++) {
9458 			u32 reg;
9459 
9460 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9461 			tw32(reg, ec->tx_coalesce_usecs);
9462 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9463 			tw32(reg, ec->tx_max_coalesced_frames);
9464 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9465 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9466 		}
9467 	}
9468 
9469 	for (; i < tp->irq_max - 1; i++) {
9470 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9471 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9472 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9473 	}
9474 }
9475 
tg3_coal_rx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9476 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9477 {
9478 	int i = 0;
9479 	u32 limit = tp->rxq_cnt;
9480 
9481 	if (!tg3_flag(tp, ENABLE_RSS)) {
9482 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9483 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9484 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9485 		limit--;
9486 	} else {
9487 		tw32(HOSTCC_RXCOL_TICKS, 0);
9488 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9489 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9490 	}
9491 
9492 	for (; i < limit; i++) {
9493 		u32 reg;
9494 
9495 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9496 		tw32(reg, ec->rx_coalesce_usecs);
9497 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9498 		tw32(reg, ec->rx_max_coalesced_frames);
9499 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9500 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9501 	}
9502 
9503 	for (; i < tp->irq_max - 1; i++) {
9504 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9505 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9506 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9507 	}
9508 }
9509 
__tg3_set_coalesce(struct tg3 * tp,struct ethtool_coalesce * ec)9510 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9511 {
9512 	tg3_coal_tx_init(tp, ec);
9513 	tg3_coal_rx_init(tp, ec);
9514 
9515 	if (!tg3_flag(tp, 5705_PLUS)) {
9516 		u32 val = ec->stats_block_coalesce_usecs;
9517 
9518 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9519 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9520 
9521 		if (!tp->link_up)
9522 			val = 0;
9523 
9524 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9525 	}
9526 }
9527 
9528 /* tp->lock is held. */
tg3_tx_rcbs_disable(struct tg3 * tp)9529 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9530 {
9531 	u32 txrcb, limit;
9532 
9533 	/* Disable all transmit rings but the first. */
9534 	if (!tg3_flag(tp, 5705_PLUS))
9535 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9536 	else if (tg3_flag(tp, 5717_PLUS))
9537 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9538 	else if (tg3_flag(tp, 57765_CLASS) ||
9539 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9540 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9541 	else
9542 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9543 
9544 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9545 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9546 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9547 			      BDINFO_FLAGS_DISABLED);
9548 }
9549 
9550 /* tp->lock is held. */
tg3_tx_rcbs_init(struct tg3 * tp)9551 static void tg3_tx_rcbs_init(struct tg3 *tp)
9552 {
9553 	int i = 0;
9554 	u32 txrcb = NIC_SRAM_SEND_RCB;
9555 
9556 	if (tg3_flag(tp, ENABLE_TSS))
9557 		i++;
9558 
9559 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9560 		struct tg3_napi *tnapi = &tp->napi[i];
9561 
9562 		if (!tnapi->tx_ring)
9563 			continue;
9564 
9565 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9566 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9567 			       NIC_SRAM_TX_BUFFER_DESC);
9568 	}
9569 }
9570 
9571 /* tp->lock is held. */
tg3_rx_ret_rcbs_disable(struct tg3 * tp)9572 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9573 {
9574 	u32 rxrcb, limit;
9575 
9576 	/* Disable all receive return rings but the first. */
9577 	if (tg3_flag(tp, 5717_PLUS))
9578 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9579 	else if (!tg3_flag(tp, 5705_PLUS))
9580 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9581 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9582 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9583 		 tg3_flag(tp, 57765_CLASS))
9584 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9585 	else
9586 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9587 
9588 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9589 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9590 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9591 			      BDINFO_FLAGS_DISABLED);
9592 }
9593 
9594 /* tp->lock is held. */
tg3_rx_ret_rcbs_init(struct tg3 * tp)9595 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9596 {
9597 	int i = 0;
9598 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9599 
9600 	if (tg3_flag(tp, ENABLE_RSS))
9601 		i++;
9602 
9603 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9604 		struct tg3_napi *tnapi = &tp->napi[i];
9605 
9606 		if (!tnapi->rx_rcb)
9607 			continue;
9608 
9609 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9610 			       (tp->rx_ret_ring_mask + 1) <<
9611 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9612 	}
9613 }
9614 
9615 /* tp->lock is held. */
tg3_rings_reset(struct tg3 * tp)9616 static void tg3_rings_reset(struct tg3 *tp)
9617 {
9618 	int i;
9619 	u32 stblk;
9620 	struct tg3_napi *tnapi = &tp->napi[0];
9621 
9622 	tg3_tx_rcbs_disable(tp);
9623 
9624 	tg3_rx_ret_rcbs_disable(tp);
9625 
9626 	/* Disable interrupts */
9627 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9628 	tp->napi[0].chk_msi_cnt = 0;
9629 	tp->napi[0].last_rx_cons = 0;
9630 	tp->napi[0].last_tx_cons = 0;
9631 
9632 	/* Zero mailbox registers. */
9633 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9634 		for (i = 1; i < tp->irq_max; i++) {
9635 			tp->napi[i].tx_prod = 0;
9636 			tp->napi[i].tx_cons = 0;
9637 			if (tg3_flag(tp, ENABLE_TSS))
9638 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9639 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9640 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9641 			tp->napi[i].chk_msi_cnt = 0;
9642 			tp->napi[i].last_rx_cons = 0;
9643 			tp->napi[i].last_tx_cons = 0;
9644 		}
9645 		if (!tg3_flag(tp, ENABLE_TSS))
9646 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9647 	} else {
9648 		tp->napi[0].tx_prod = 0;
9649 		tp->napi[0].tx_cons = 0;
9650 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9651 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9652 	}
9653 
9654 	/* Make sure the NIC-based send BD rings are disabled. */
9655 	if (!tg3_flag(tp, 5705_PLUS)) {
9656 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9657 		for (i = 0; i < 16; i++)
9658 			tw32_tx_mbox(mbox + i * 8, 0);
9659 	}
9660 
9661 	/* Clear status block in ram. */
9662 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9663 
9664 	/* Set status block DMA address */
9665 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9666 	     ((u64) tnapi->status_mapping >> 32));
9667 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9668 	     ((u64) tnapi->status_mapping & 0xffffffff));
9669 
9670 	stblk = HOSTCC_STATBLCK_RING1;
9671 
9672 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9673 		u64 mapping = (u64)tnapi->status_mapping;
9674 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9675 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9676 		stblk += 8;
9677 
9678 		/* Clear status block in ram. */
9679 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9680 	}
9681 
9682 	tg3_tx_rcbs_init(tp);
9683 	tg3_rx_ret_rcbs_init(tp);
9684 }
9685 
tg3_setup_rxbd_thresholds(struct tg3 * tp)9686 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9687 {
9688 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9689 
9690 	if (!tg3_flag(tp, 5750_PLUS) ||
9691 	    tg3_flag(tp, 5780_CLASS) ||
9692 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9693 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9694 	    tg3_flag(tp, 57765_PLUS))
9695 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9696 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9697 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9698 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9699 	else
9700 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9701 
9702 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9703 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9704 
9705 	val = min(nic_rep_thresh, host_rep_thresh);
9706 	tw32(RCVBDI_STD_THRESH, val);
9707 
9708 	if (tg3_flag(tp, 57765_PLUS))
9709 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9710 
9711 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9712 		return;
9713 
9714 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9715 
9716 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9717 
9718 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9719 	tw32(RCVBDI_JUMBO_THRESH, val);
9720 
9721 	if (tg3_flag(tp, 57765_PLUS))
9722 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9723 }
9724 
calc_crc(unsigned char * buf,int len)9725 static inline u32 calc_crc(unsigned char *buf, int len)
9726 {
9727 	u32 reg;
9728 	u32 tmp;
9729 	int j, k;
9730 
9731 	reg = 0xffffffff;
9732 
9733 	for (j = 0; j < len; j++) {
9734 		reg ^= buf[j];
9735 
9736 		for (k = 0; k < 8; k++) {
9737 			tmp = reg & 0x01;
9738 
9739 			reg >>= 1;
9740 
9741 			if (tmp)
9742 				reg ^= CRC32_POLY_LE;
9743 		}
9744 	}
9745 
9746 	return ~reg;
9747 }
9748 
tg3_set_multi(struct tg3 * tp,unsigned int accept_all)9749 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9750 {
9751 	/* accept or reject all multicast frames */
9752 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9753 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9754 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9755 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9756 }
9757 
__tg3_set_rx_mode(struct net_device * dev)9758 static void __tg3_set_rx_mode(struct net_device *dev)
9759 {
9760 	struct tg3 *tp = netdev_priv(dev);
9761 	u32 rx_mode;
9762 
9763 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9764 				  RX_MODE_KEEP_VLAN_TAG);
9765 
9766 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9767 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9768 	 * flag clear.
9769 	 */
9770 	if (!tg3_flag(tp, ENABLE_ASF))
9771 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9772 #endif
9773 
9774 	if (dev->flags & IFF_PROMISC) {
9775 		/* Promiscuous mode. */
9776 		rx_mode |= RX_MODE_PROMISC;
9777 	} else if (dev->flags & IFF_ALLMULTI) {
9778 		/* Accept all multicast. */
9779 		tg3_set_multi(tp, 1);
9780 	} else if (netdev_mc_empty(dev)) {
9781 		/* Reject all multicast. */
9782 		tg3_set_multi(tp, 0);
9783 	} else {
9784 		/* Accept one or more multicast(s). */
9785 		struct netdev_hw_addr *ha;
9786 		u32 mc_filter[4] = { 0, };
9787 		u32 regidx;
9788 		u32 bit;
9789 		u32 crc;
9790 
9791 		netdev_for_each_mc_addr(ha, dev) {
9792 			crc = calc_crc(ha->addr, ETH_ALEN);
9793 			bit = ~crc & 0x7f;
9794 			regidx = (bit & 0x60) >> 5;
9795 			bit &= 0x1f;
9796 			mc_filter[regidx] |= (1 << bit);
9797 		}
9798 
9799 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9800 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9801 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9802 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9803 	}
9804 
9805 	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9806 		rx_mode |= RX_MODE_PROMISC;
9807 	} else if (!(dev->flags & IFF_PROMISC)) {
9808 		/* Add all entries into to the mac addr filter list */
9809 		int i = 0;
9810 		struct netdev_hw_addr *ha;
9811 
9812 		netdev_for_each_uc_addr(ha, dev) {
9813 			__tg3_set_one_mac_addr(tp, ha->addr,
9814 					       i + TG3_UCAST_ADDR_IDX(tp));
9815 			i++;
9816 		}
9817 	}
9818 
9819 	if (rx_mode != tp->rx_mode) {
9820 		tp->rx_mode = rx_mode;
9821 		tw32_f(MAC_RX_MODE, rx_mode);
9822 		udelay(10);
9823 	}
9824 }
9825 
tg3_rss_init_dflt_indir_tbl(struct tg3 * tp,u32 qcnt)9826 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9827 {
9828 	int i;
9829 
9830 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9831 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9832 }
9833 
tg3_rss_check_indir_tbl(struct tg3 * tp)9834 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9835 {
9836 	int i;
9837 
9838 	if (!tg3_flag(tp, SUPPORT_MSIX))
9839 		return;
9840 
9841 	if (tp->rxq_cnt == 1) {
9842 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9843 		return;
9844 	}
9845 
9846 	/* Validate table against current IRQ count */
9847 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9848 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9849 			break;
9850 	}
9851 
9852 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9853 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9854 }
9855 
tg3_rss_write_indir_tbl(struct tg3 * tp)9856 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9857 {
9858 	int i = 0;
9859 	u32 reg = MAC_RSS_INDIR_TBL_0;
9860 
9861 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9862 		u32 val = tp->rss_ind_tbl[i];
9863 		i++;
9864 		for (; i % 8; i++) {
9865 			val <<= 4;
9866 			val |= tp->rss_ind_tbl[i];
9867 		}
9868 		tw32(reg, val);
9869 		reg += 4;
9870 	}
9871 }
9872 
tg3_lso_rd_dma_workaround_bit(struct tg3 * tp)9873 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9874 {
9875 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9876 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9877 	else
9878 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9879 }
9880 
9881 /* tp->lock is held. */
tg3_reset_hw(struct tg3 * tp,bool reset_phy)9882 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9883 {
9884 	u32 val, rdmac_mode;
9885 	int i, err, limit;
9886 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9887 
9888 	tg3_disable_ints(tp);
9889 
9890 	tg3_stop_fw(tp);
9891 
9892 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9893 
9894 	if (tg3_flag(tp, INIT_COMPLETE))
9895 		tg3_abort_hw(tp, 1);
9896 
9897 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9898 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9899 		tg3_phy_pull_config(tp);
9900 		tg3_eee_pull_config(tp, NULL);
9901 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9902 	}
9903 
9904 	/* Enable MAC control of LPI */
9905 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9906 		tg3_setup_eee(tp);
9907 
9908 	if (reset_phy)
9909 		tg3_phy_reset(tp);
9910 
9911 	err = tg3_chip_reset(tp);
9912 	if (err)
9913 		return err;
9914 
9915 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9916 
9917 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9918 		val = tr32(TG3_CPMU_CTRL);
9919 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9920 		tw32(TG3_CPMU_CTRL, val);
9921 
9922 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9923 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9924 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9925 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9926 
9927 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9928 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9929 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9930 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9931 
9932 		val = tr32(TG3_CPMU_HST_ACC);
9933 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9934 		val |= CPMU_HST_ACC_MACCLK_6_25;
9935 		tw32(TG3_CPMU_HST_ACC, val);
9936 	}
9937 
9938 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9939 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9940 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9941 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9942 		tw32(PCIE_PWR_MGMT_THRESH, val);
9943 
9944 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9945 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9946 
9947 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9948 
9949 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9950 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9951 	}
9952 
9953 	if (tg3_flag(tp, L1PLLPD_EN)) {
9954 		u32 grc_mode = tr32(GRC_MODE);
9955 
9956 		/* Access the lower 1K of PL PCIE block registers. */
9957 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9958 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9959 
9960 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9961 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9962 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9963 
9964 		tw32(GRC_MODE, grc_mode);
9965 	}
9966 
9967 	if (tg3_flag(tp, 57765_CLASS)) {
9968 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9969 			u32 grc_mode = tr32(GRC_MODE);
9970 
9971 			/* Access the lower 1K of PL PCIE block registers. */
9972 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9973 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9974 
9975 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9976 				   TG3_PCIE_PL_LO_PHYCTL5);
9977 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9978 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9979 
9980 			tw32(GRC_MODE, grc_mode);
9981 		}
9982 
9983 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9984 			u32 grc_mode;
9985 
9986 			/* Fix transmit hangs */
9987 			val = tr32(TG3_CPMU_PADRNG_CTL);
9988 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9989 			tw32(TG3_CPMU_PADRNG_CTL, val);
9990 
9991 			grc_mode = tr32(GRC_MODE);
9992 
9993 			/* Access the lower 1K of DL PCIE block registers. */
9994 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9995 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9996 
9997 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9998 				   TG3_PCIE_DL_LO_FTSMAX);
9999 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
10000 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
10001 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
10002 
10003 			tw32(GRC_MODE, grc_mode);
10004 		}
10005 
10006 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10007 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10008 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
10009 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10010 	}
10011 
10012 	/* This works around an issue with Athlon chipsets on
10013 	 * B3 tigon3 silicon.  This bit has no effect on any
10014 	 * other revision.  But do not set this on PCI Express
10015 	 * chips and don't even touch the clocks if the CPMU is present.
10016 	 */
10017 	if (!tg3_flag(tp, CPMU_PRESENT)) {
10018 		if (!tg3_flag(tp, PCI_EXPRESS))
10019 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10020 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10021 	}
10022 
10023 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10024 	    tg3_flag(tp, PCIX_MODE)) {
10025 		val = tr32(TG3PCI_PCISTATE);
10026 		val |= PCISTATE_RETRY_SAME_DMA;
10027 		tw32(TG3PCI_PCISTATE, val);
10028 	}
10029 
10030 	if (tg3_flag(tp, ENABLE_APE)) {
10031 		/* Allow reads and writes to the
10032 		 * APE register and memory space.
10033 		 */
10034 		val = tr32(TG3PCI_PCISTATE);
10035 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10036 		       PCISTATE_ALLOW_APE_SHMEM_WR |
10037 		       PCISTATE_ALLOW_APE_PSPACE_WR;
10038 		tw32(TG3PCI_PCISTATE, val);
10039 	}
10040 
10041 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10042 		/* Enable some hw fixes.  */
10043 		val = tr32(TG3PCI_MSI_DATA);
10044 		val |= (1 << 26) | (1 << 28) | (1 << 29);
10045 		tw32(TG3PCI_MSI_DATA, val);
10046 	}
10047 
10048 	/* Descriptor ring init may make accesses to the
10049 	 * NIC SRAM area to setup the TX descriptors, so we
10050 	 * can only do this after the hardware has been
10051 	 * successfully reset.
10052 	 */
10053 	err = tg3_init_rings(tp);
10054 	if (err)
10055 		return err;
10056 
10057 	if (tg3_flag(tp, 57765_PLUS)) {
10058 		val = tr32(TG3PCI_DMA_RW_CTRL) &
10059 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10060 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10061 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10062 		if (!tg3_flag(tp, 57765_CLASS) &&
10063 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10064 		    tg3_asic_rev(tp) != ASIC_REV_5762)
10065 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
10066 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10067 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10068 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
10069 		/* This value is determined during the probe time DMA
10070 		 * engine test, tg3_test_dma.
10071 		 */
10072 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10073 	}
10074 
10075 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10076 			  GRC_MODE_4X_NIC_SEND_RINGS |
10077 			  GRC_MODE_NO_TX_PHDR_CSUM |
10078 			  GRC_MODE_NO_RX_PHDR_CSUM);
10079 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10080 
10081 	/* Pseudo-header checksum is done by hardware logic and not
10082 	 * the offload processers, so make the chip do the pseudo-
10083 	 * header checksums on receive.  For transmit it is more
10084 	 * convenient to do the pseudo-header checksum in software
10085 	 * as Linux does that on transmit for us in all cases.
10086 	 */
10087 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10088 
10089 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10090 	if (tp->rxptpctl)
10091 		tw32(TG3_RX_PTP_CTL,
10092 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10093 
10094 	if (tg3_flag(tp, PTP_CAPABLE))
10095 		val |= GRC_MODE_TIME_SYNC_ENABLE;
10096 
10097 	tw32(GRC_MODE, tp->grc_mode | val);
10098 
10099 	/* On one of the AMD platform, MRRS is restricted to 4000 because of
10100 	 * south bridge limitation. As a workaround, Driver is setting MRRS
10101 	 * to 2048 instead of default 4096.
10102 	 */
10103 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10104 	    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10105 		val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10106 		tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10107 	}
10108 
10109 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
10110 	val = tr32(GRC_MISC_CFG);
10111 	val &= ~0xff;
10112 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10113 	tw32(GRC_MISC_CFG, val);
10114 
10115 	/* Initialize MBUF/DESC pool. */
10116 	if (tg3_flag(tp, 5750_PLUS)) {
10117 		/* Do nothing.  */
10118 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10119 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10120 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
10121 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10122 		else
10123 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10124 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10125 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10126 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
10127 		int fw_len;
10128 
10129 		fw_len = tp->fw_len;
10130 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10131 		tw32(BUFMGR_MB_POOL_ADDR,
10132 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10133 		tw32(BUFMGR_MB_POOL_SIZE,
10134 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10135 	}
10136 
10137 	if (tp->dev->mtu <= ETH_DATA_LEN) {
10138 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10139 		     tp->bufmgr_config.mbuf_read_dma_low_water);
10140 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10141 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
10142 		tw32(BUFMGR_MB_HIGH_WATER,
10143 		     tp->bufmgr_config.mbuf_high_water);
10144 	} else {
10145 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10146 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10147 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10148 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10149 		tw32(BUFMGR_MB_HIGH_WATER,
10150 		     tp->bufmgr_config.mbuf_high_water_jumbo);
10151 	}
10152 	tw32(BUFMGR_DMA_LOW_WATER,
10153 	     tp->bufmgr_config.dma_low_water);
10154 	tw32(BUFMGR_DMA_HIGH_WATER,
10155 	     tp->bufmgr_config.dma_high_water);
10156 
10157 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10158 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
10159 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10160 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10161 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
10162 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10163 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10164 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10165 	tw32(BUFMGR_MODE, val);
10166 	for (i = 0; i < 2000; i++) {
10167 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10168 			break;
10169 		udelay(10);
10170 	}
10171 	if (i >= 2000) {
10172 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10173 		return -ENODEV;
10174 	}
10175 
10176 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10177 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10178 
10179 	tg3_setup_rxbd_thresholds(tp);
10180 
10181 	/* Initialize TG3_BDINFO's at:
10182 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
10183 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
10184 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
10185 	 *
10186 	 * like so:
10187 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
10188 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
10189 	 *                              ring attribute flags
10190 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
10191 	 *
10192 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10193 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10194 	 *
10195 	 * The size of each ring is fixed in the firmware, but the location is
10196 	 * configurable.
10197 	 */
10198 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10199 	     ((u64) tpr->rx_std_mapping >> 32));
10200 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10201 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10202 	if (!tg3_flag(tp, 5717_PLUS))
10203 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10204 		     NIC_SRAM_RX_BUFFER_DESC);
10205 
10206 	/* Disable the mini ring */
10207 	if (!tg3_flag(tp, 5705_PLUS))
10208 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10209 		     BDINFO_FLAGS_DISABLED);
10210 
10211 	/* Program the jumbo buffer descriptor ring control
10212 	 * blocks on those devices that have them.
10213 	 */
10214 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10215 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10216 
10217 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10218 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10219 			     ((u64) tpr->rx_jmb_mapping >> 32));
10220 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10221 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10222 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10223 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10224 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10225 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10226 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10227 			    tg3_flag(tp, 57765_CLASS) ||
10228 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10229 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10230 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10231 		} else {
10232 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10233 			     BDINFO_FLAGS_DISABLED);
10234 		}
10235 
10236 		if (tg3_flag(tp, 57765_PLUS)) {
10237 			val = TG3_RX_STD_RING_SIZE(tp);
10238 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10239 			val |= (TG3_RX_STD_DMA_SZ << 2);
10240 		} else
10241 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10242 	} else
10243 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10244 
10245 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10246 
10247 	tpr->rx_std_prod_idx = tp->rx_pending;
10248 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10249 
10250 	tpr->rx_jmb_prod_idx =
10251 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10252 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10253 
10254 	tg3_rings_reset(tp);
10255 
10256 	/* Initialize MAC address and backoff seed. */
10257 	__tg3_set_mac_addr(tp, false);
10258 
10259 	/* MTU + ethernet header + FCS + optional VLAN tag */
10260 	tw32(MAC_RX_MTU_SIZE,
10261 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10262 
10263 	/* The slot time is changed by tg3_setup_phy if we
10264 	 * run at gigabit with half duplex.
10265 	 */
10266 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10267 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10268 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10269 
10270 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10271 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10272 		val |= tr32(MAC_TX_LENGTHS) &
10273 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10274 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10275 
10276 	tw32(MAC_TX_LENGTHS, val);
10277 
10278 	/* Receive rules. */
10279 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10280 	tw32(RCVLPC_CONFIG, 0x0181);
10281 
10282 	/* Calculate RDMAC_MODE setting early, we need it to determine
10283 	 * the RCVLPC_STATE_ENABLE mask.
10284 	 */
10285 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10286 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10287 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10288 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10289 		      RDMAC_MODE_LNGREAD_ENAB);
10290 
10291 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10292 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10293 
10294 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10295 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10296 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10297 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10298 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10299 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10300 
10301 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10302 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10303 		if (tg3_flag(tp, TSO_CAPABLE) &&
10304 		    tg3_asic_rev(tp) == ASIC_REV_5705) {
10305 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10306 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10307 			   !tg3_flag(tp, IS_5788)) {
10308 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10309 		}
10310 	}
10311 
10312 	if (tg3_flag(tp, PCI_EXPRESS))
10313 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10314 
10315 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10316 		tp->dma_limit = 0;
10317 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10318 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10319 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10320 		}
10321 	}
10322 
10323 	if (tg3_flag(tp, HW_TSO_1) ||
10324 	    tg3_flag(tp, HW_TSO_2) ||
10325 	    tg3_flag(tp, HW_TSO_3))
10326 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10327 
10328 	if (tg3_flag(tp, 57765_PLUS) ||
10329 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10330 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10331 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10332 
10333 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10334 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10335 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10336 
10337 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10338 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10339 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10340 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10341 	    tg3_flag(tp, 57765_PLUS)) {
10342 		u32 tgtreg;
10343 
10344 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10345 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10346 		else
10347 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10348 
10349 		val = tr32(tgtreg);
10350 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10351 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10352 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10353 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10354 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10355 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10356 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10357 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10358 		}
10359 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10360 	}
10361 
10362 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10363 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10364 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10365 		u32 tgtreg;
10366 
10367 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10368 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10369 		else
10370 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10371 
10372 		val = tr32(tgtreg);
10373 		tw32(tgtreg, val |
10374 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10375 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10376 	}
10377 
10378 	/* Receive/send statistics. */
10379 	if (tg3_flag(tp, 5750_PLUS)) {
10380 		val = tr32(RCVLPC_STATS_ENABLE);
10381 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10382 		tw32(RCVLPC_STATS_ENABLE, val);
10383 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10384 		   tg3_flag(tp, TSO_CAPABLE)) {
10385 		val = tr32(RCVLPC_STATS_ENABLE);
10386 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10387 		tw32(RCVLPC_STATS_ENABLE, val);
10388 	} else {
10389 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10390 	}
10391 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10392 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10393 	tw32(SNDDATAI_STATSCTRL,
10394 	     (SNDDATAI_SCTRL_ENABLE |
10395 	      SNDDATAI_SCTRL_FASTUPD));
10396 
10397 	/* Setup host coalescing engine. */
10398 	tw32(HOSTCC_MODE, 0);
10399 	for (i = 0; i < 2000; i++) {
10400 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10401 			break;
10402 		udelay(10);
10403 	}
10404 
10405 	__tg3_set_coalesce(tp, &tp->coal);
10406 
10407 	if (!tg3_flag(tp, 5705_PLUS)) {
10408 		/* Status/statistics block address.  See tg3_timer,
10409 		 * the tg3_periodic_fetch_stats call there, and
10410 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10411 		 */
10412 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10413 		     ((u64) tp->stats_mapping >> 32));
10414 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10415 		     ((u64) tp->stats_mapping & 0xffffffff));
10416 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10417 
10418 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10419 
10420 		/* Clear statistics and status block memory areas */
10421 		for (i = NIC_SRAM_STATS_BLK;
10422 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10423 		     i += sizeof(u32)) {
10424 			tg3_write_mem(tp, i, 0);
10425 			udelay(40);
10426 		}
10427 	}
10428 
10429 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10430 
10431 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10432 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10433 	if (!tg3_flag(tp, 5705_PLUS))
10434 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10435 
10436 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10437 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10438 		/* reset to prevent losing 1st rx packet intermittently */
10439 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10440 		udelay(10);
10441 	}
10442 
10443 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10444 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10445 			MAC_MODE_FHDE_ENABLE;
10446 	if (tg3_flag(tp, ENABLE_APE))
10447 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10448 	if (!tg3_flag(tp, 5705_PLUS) &&
10449 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10450 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10451 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10452 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10453 	udelay(40);
10454 
10455 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10456 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10457 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10458 	 * whether used as inputs or outputs, are set by boot code after
10459 	 * reset.
10460 	 */
10461 	if (!tg3_flag(tp, IS_NIC)) {
10462 		u32 gpio_mask;
10463 
10464 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10465 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10466 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10467 
10468 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10469 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10470 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10471 
10472 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10473 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10474 
10475 		tp->grc_local_ctrl &= ~gpio_mask;
10476 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10477 
10478 		/* GPIO1 must be driven high for eeprom write protect */
10479 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10480 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10481 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10482 	}
10483 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10484 	udelay(100);
10485 
10486 	if (tg3_flag(tp, USING_MSIX)) {
10487 		val = tr32(MSGINT_MODE);
10488 		val |= MSGINT_MODE_ENABLE;
10489 		if (tp->irq_cnt > 1)
10490 			val |= MSGINT_MODE_MULTIVEC_EN;
10491 		if (!tg3_flag(tp, 1SHOT_MSI))
10492 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10493 		tw32(MSGINT_MODE, val);
10494 	}
10495 
10496 	if (!tg3_flag(tp, 5705_PLUS)) {
10497 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10498 		udelay(40);
10499 	}
10500 
10501 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10502 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10503 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10504 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10505 	       WDMAC_MODE_LNGREAD_ENAB);
10506 
10507 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10508 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10509 		if (tg3_flag(tp, TSO_CAPABLE) &&
10510 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10511 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10512 			/* nothing */
10513 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10514 			   !tg3_flag(tp, IS_5788)) {
10515 			val |= WDMAC_MODE_RX_ACCEL;
10516 		}
10517 	}
10518 
10519 	/* Enable host coalescing bug fix */
10520 	if (tg3_flag(tp, 5755_PLUS))
10521 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10522 
10523 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10524 		val |= WDMAC_MODE_BURST_ALL_DATA;
10525 
10526 	tw32_f(WDMAC_MODE, val);
10527 	udelay(40);
10528 
10529 	if (tg3_flag(tp, PCIX_MODE)) {
10530 		u16 pcix_cmd;
10531 
10532 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10533 				     &pcix_cmd);
10534 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10535 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10536 			pcix_cmd |= PCI_X_CMD_READ_2K;
10537 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10538 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10539 			pcix_cmd |= PCI_X_CMD_READ_2K;
10540 		}
10541 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10542 				      pcix_cmd);
10543 	}
10544 
10545 	tw32_f(RDMAC_MODE, rdmac_mode);
10546 	udelay(40);
10547 
10548 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10549 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10550 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10551 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10552 				break;
10553 		}
10554 		if (i < TG3_NUM_RDMA_CHANNELS) {
10555 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10556 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10557 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10558 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10559 		}
10560 	}
10561 
10562 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10563 	if (!tg3_flag(tp, 5705_PLUS))
10564 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10565 
10566 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10567 		tw32(SNDDATAC_MODE,
10568 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10569 	else
10570 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10571 
10572 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10573 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10574 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10575 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10576 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10577 	tw32(RCVDBDI_MODE, val);
10578 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10579 	if (tg3_flag(tp, HW_TSO_1) ||
10580 	    tg3_flag(tp, HW_TSO_2) ||
10581 	    tg3_flag(tp, HW_TSO_3))
10582 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10583 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10584 	if (tg3_flag(tp, ENABLE_TSS))
10585 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10586 	tw32(SNDBDI_MODE, val);
10587 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10588 
10589 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10590 		err = tg3_load_5701_a0_firmware_fix(tp);
10591 		if (err)
10592 			return err;
10593 	}
10594 
10595 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10596 		/* Ignore any errors for the firmware download. If download
10597 		 * fails, the device will operate with EEE disabled
10598 		 */
10599 		tg3_load_57766_firmware(tp);
10600 	}
10601 
10602 	if (tg3_flag(tp, TSO_CAPABLE)) {
10603 		err = tg3_load_tso_firmware(tp);
10604 		if (err)
10605 			return err;
10606 	}
10607 
10608 	tp->tx_mode = TX_MODE_ENABLE;
10609 
10610 	if (tg3_flag(tp, 5755_PLUS) ||
10611 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10612 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10613 
10614 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10615 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10616 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10617 		tp->tx_mode &= ~val;
10618 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10619 	}
10620 
10621 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10622 	udelay(100);
10623 
10624 	if (tg3_flag(tp, ENABLE_RSS)) {
10625 		u32 rss_key[10];
10626 
10627 		tg3_rss_write_indir_tbl(tp);
10628 
10629 		netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10630 
10631 		for (i = 0; i < 10 ; i++)
10632 			tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10633 	}
10634 
10635 	tp->rx_mode = RX_MODE_ENABLE;
10636 	if (tg3_flag(tp, 5755_PLUS))
10637 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10638 
10639 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10640 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10641 
10642 	if (tg3_flag(tp, ENABLE_RSS))
10643 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10644 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10645 			       RX_MODE_RSS_IPV6_HASH_EN |
10646 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10647 			       RX_MODE_RSS_IPV4_HASH_EN |
10648 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10649 
10650 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10651 	udelay(10);
10652 
10653 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10654 
10655 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10656 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10657 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10658 		udelay(10);
10659 	}
10660 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10661 	udelay(10);
10662 
10663 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10664 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10665 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10666 			/* Set drive transmission level to 1.2V  */
10667 			/* only if the signal pre-emphasis bit is not set  */
10668 			val = tr32(MAC_SERDES_CFG);
10669 			val &= 0xfffff000;
10670 			val |= 0x880;
10671 			tw32(MAC_SERDES_CFG, val);
10672 		}
10673 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10674 			tw32(MAC_SERDES_CFG, 0x616000);
10675 	}
10676 
10677 	/* Prevent chip from dropping frames when flow control
10678 	 * is enabled.
10679 	 */
10680 	if (tg3_flag(tp, 57765_CLASS))
10681 		val = 1;
10682 	else
10683 		val = 2;
10684 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10685 
10686 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10687 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10688 		/* Use hardware link auto-negotiation */
10689 		tg3_flag_set(tp, HW_AUTONEG);
10690 	}
10691 
10692 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10693 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10694 		u32 tmp;
10695 
10696 		tmp = tr32(SERDES_RX_CTRL);
10697 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10698 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10699 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10700 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10701 	}
10702 
10703 	if (!tg3_flag(tp, USE_PHYLIB)) {
10704 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10705 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10706 
10707 		err = tg3_setup_phy(tp, false);
10708 		if (err)
10709 			return err;
10710 
10711 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10712 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10713 			u32 tmp;
10714 
10715 			/* Clear CRC stats. */
10716 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10717 				tg3_writephy(tp, MII_TG3_TEST1,
10718 					     tmp | MII_TG3_TEST1_CRC_EN);
10719 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10720 			}
10721 		}
10722 	}
10723 
10724 	__tg3_set_rx_mode(tp->dev);
10725 
10726 	/* Initialize receive rules. */
10727 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10728 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10729 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10730 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10731 
10732 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10733 		limit = 8;
10734 	else
10735 		limit = 16;
10736 	if (tg3_flag(tp, ENABLE_ASF))
10737 		limit -= 4;
10738 	switch (limit) {
10739 	case 16:
10740 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10741 		fallthrough;
10742 	case 15:
10743 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10744 		fallthrough;
10745 	case 14:
10746 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10747 		fallthrough;
10748 	case 13:
10749 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10750 		fallthrough;
10751 	case 12:
10752 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10753 		fallthrough;
10754 	case 11:
10755 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10756 		fallthrough;
10757 	case 10:
10758 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10759 		fallthrough;
10760 	case 9:
10761 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10762 		fallthrough;
10763 	case 8:
10764 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10765 		fallthrough;
10766 	case 7:
10767 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10768 		fallthrough;
10769 	case 6:
10770 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10771 		fallthrough;
10772 	case 5:
10773 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10774 		fallthrough;
10775 	case 4:
10776 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10777 	case 3:
10778 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10779 	case 2:
10780 	case 1:
10781 
10782 	default:
10783 		break;
10784 	}
10785 
10786 	if (tg3_flag(tp, ENABLE_APE))
10787 		/* Write our heartbeat update interval to APE. */
10788 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10789 				APE_HOST_HEARTBEAT_INT_5SEC);
10790 
10791 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10792 
10793 	return 0;
10794 }
10795 
10796 /* Called at device open time to get the chip ready for
10797  * packet processing.  Invoked with tp->lock held.
10798  */
tg3_init_hw(struct tg3 * tp,bool reset_phy)10799 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10800 {
10801 	/* Chip may have been just powered on. If so, the boot code may still
10802 	 * be running initialization. Wait for it to finish to avoid races in
10803 	 * accessing the hardware.
10804 	 */
10805 	tg3_enable_register_access(tp);
10806 	tg3_poll_fw(tp);
10807 
10808 	tg3_switch_clocks(tp);
10809 
10810 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10811 
10812 	return tg3_reset_hw(tp, reset_phy);
10813 }
10814 
10815 #ifdef CONFIG_TIGON3_HWMON
tg3_sd_scan_scratchpad(struct tg3 * tp,struct tg3_ocir * ocir)10816 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10817 {
10818 	u32 off, len = TG3_OCIR_LEN;
10819 	int i;
10820 
10821 	for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10822 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10823 
10824 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10825 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10826 			memset(ocir, 0, len);
10827 	}
10828 }
10829 
10830 /* sysfs attributes for hwmon */
tg3_show_temp(struct device * dev,struct device_attribute * devattr,char * buf)10831 static ssize_t tg3_show_temp(struct device *dev,
10832 			     struct device_attribute *devattr, char *buf)
10833 {
10834 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10835 	struct tg3 *tp = dev_get_drvdata(dev);
10836 	u32 temperature;
10837 
10838 	spin_lock_bh(&tp->lock);
10839 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10840 				sizeof(temperature));
10841 	spin_unlock_bh(&tp->lock);
10842 	return sprintf(buf, "%u\n", temperature * 1000);
10843 }
10844 
10845 
10846 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10847 			  TG3_TEMP_SENSOR_OFFSET);
10848 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10849 			  TG3_TEMP_CAUTION_OFFSET);
10850 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10851 			  TG3_TEMP_MAX_OFFSET);
10852 
10853 static struct attribute *tg3_attrs[] = {
10854 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10855 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10856 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10857 	NULL
10858 };
10859 ATTRIBUTE_GROUPS(tg3);
10860 
tg3_hwmon_close(struct tg3 * tp)10861 static void tg3_hwmon_close(struct tg3 *tp)
10862 {
10863 	if (tp->hwmon_dev) {
10864 		hwmon_device_unregister(tp->hwmon_dev);
10865 		tp->hwmon_dev = NULL;
10866 	}
10867 }
10868 
tg3_hwmon_open(struct tg3 * tp)10869 static void tg3_hwmon_open(struct tg3 *tp)
10870 {
10871 	int i;
10872 	u32 size = 0;
10873 	struct pci_dev *pdev = tp->pdev;
10874 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10875 
10876 	tg3_sd_scan_scratchpad(tp, ocirs);
10877 
10878 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10879 		if (!ocirs[i].src_data_length)
10880 			continue;
10881 
10882 		size += ocirs[i].src_hdr_length;
10883 		size += ocirs[i].src_data_length;
10884 	}
10885 
10886 	if (!size)
10887 		return;
10888 
10889 	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10890 							  tp, tg3_groups);
10891 	if (IS_ERR(tp->hwmon_dev)) {
10892 		tp->hwmon_dev = NULL;
10893 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10894 	}
10895 }
10896 #else
tg3_hwmon_close(struct tg3 * tp)10897 static inline void tg3_hwmon_close(struct tg3 *tp) { }
tg3_hwmon_open(struct tg3 * tp)10898 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10899 #endif /* CONFIG_TIGON3_HWMON */
10900 
10901 
10902 #define TG3_STAT_ADD32(PSTAT, REG) \
10903 do {	u32 __val = tr32(REG); \
10904 	(PSTAT)->low += __val; \
10905 	if ((PSTAT)->low < __val) \
10906 		(PSTAT)->high += 1; \
10907 } while (0)
10908 
tg3_periodic_fetch_stats(struct tg3 * tp)10909 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10910 {
10911 	struct tg3_hw_stats *sp = tp->hw_stats;
10912 
10913 	if (!tp->link_up)
10914 		return;
10915 
10916 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10917 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10918 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10919 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10920 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10921 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10922 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10923 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10924 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10925 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10926 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10927 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10928 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10929 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10930 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10931 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10932 		u32 val;
10933 
10934 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10935 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10936 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10937 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10938 	}
10939 
10940 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10941 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10942 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10943 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10944 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10945 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10946 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10947 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10948 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10949 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10950 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10951 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10952 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10953 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10954 
10955 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10956 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10957 	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
10958 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10959 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10960 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10961 	} else {
10962 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10963 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10964 		if (val) {
10965 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10966 			sp->rx_discards.low += val;
10967 			if (sp->rx_discards.low < val)
10968 				sp->rx_discards.high += 1;
10969 		}
10970 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10971 	}
10972 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10973 }
10974 
tg3_chk_missed_msi(struct tg3 * tp)10975 static void tg3_chk_missed_msi(struct tg3 *tp)
10976 {
10977 	u32 i;
10978 
10979 	for (i = 0; i < tp->irq_cnt; i++) {
10980 		struct tg3_napi *tnapi = &tp->napi[i];
10981 
10982 		if (tg3_has_work(tnapi)) {
10983 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10984 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10985 				if (tnapi->chk_msi_cnt < 1) {
10986 					tnapi->chk_msi_cnt++;
10987 					return;
10988 				}
10989 				tg3_msi(0, tnapi);
10990 			}
10991 		}
10992 		tnapi->chk_msi_cnt = 0;
10993 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10994 		tnapi->last_tx_cons = tnapi->tx_cons;
10995 	}
10996 }
10997 
tg3_timer(struct timer_list * t)10998 static void tg3_timer(struct timer_list *t)
10999 {
11000 	struct tg3 *tp = from_timer(tp, t, timer);
11001 
11002 	spin_lock(&tp->lock);
11003 
11004 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
11005 		spin_unlock(&tp->lock);
11006 		goto restart_timer;
11007 	}
11008 
11009 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
11010 	    tg3_flag(tp, 57765_CLASS))
11011 		tg3_chk_missed_msi(tp);
11012 
11013 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11014 		/* BCM4785: Flush posted writes from GbE to host memory. */
11015 		tr32(HOSTCC_MODE);
11016 	}
11017 
11018 	if (!tg3_flag(tp, TAGGED_STATUS)) {
11019 		/* All of this garbage is because when using non-tagged
11020 		 * IRQ status the mailbox/status_block protocol the chip
11021 		 * uses with the cpu is race prone.
11022 		 */
11023 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11024 			tw32(GRC_LOCAL_CTRL,
11025 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11026 		} else {
11027 			tw32(HOSTCC_MODE, tp->coalesce_mode |
11028 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11029 		}
11030 
11031 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11032 			spin_unlock(&tp->lock);
11033 			tg3_reset_task_schedule(tp);
11034 			goto restart_timer;
11035 		}
11036 	}
11037 
11038 	/* This part only runs once per second. */
11039 	if (!--tp->timer_counter) {
11040 		if (tg3_flag(tp, 5705_PLUS))
11041 			tg3_periodic_fetch_stats(tp);
11042 
11043 		if (tp->setlpicnt && !--tp->setlpicnt)
11044 			tg3_phy_eee_enable(tp);
11045 
11046 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
11047 			u32 mac_stat;
11048 			int phy_event;
11049 
11050 			mac_stat = tr32(MAC_STATUS);
11051 
11052 			phy_event = 0;
11053 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11054 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11055 					phy_event = 1;
11056 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11057 				phy_event = 1;
11058 
11059 			if (phy_event)
11060 				tg3_setup_phy(tp, false);
11061 		} else if (tg3_flag(tp, POLL_SERDES)) {
11062 			u32 mac_stat = tr32(MAC_STATUS);
11063 			int need_setup = 0;
11064 
11065 			if (tp->link_up &&
11066 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11067 				need_setup = 1;
11068 			}
11069 			if (!tp->link_up &&
11070 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
11071 					 MAC_STATUS_SIGNAL_DET))) {
11072 				need_setup = 1;
11073 			}
11074 			if (need_setup) {
11075 				if (!tp->serdes_counter) {
11076 					tw32_f(MAC_MODE,
11077 					     (tp->mac_mode &
11078 					      ~MAC_MODE_PORT_MODE_MASK));
11079 					udelay(40);
11080 					tw32_f(MAC_MODE, tp->mac_mode);
11081 					udelay(40);
11082 				}
11083 				tg3_setup_phy(tp, false);
11084 			}
11085 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11086 			   tg3_flag(tp, 5780_CLASS)) {
11087 			tg3_serdes_parallel_detect(tp);
11088 		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11089 			u32 cpmu = tr32(TG3_CPMU_STATUS);
11090 			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11091 					 TG3_CPMU_STATUS_LINK_MASK);
11092 
11093 			if (link_up != tp->link_up)
11094 				tg3_setup_phy(tp, false);
11095 		}
11096 
11097 		tp->timer_counter = tp->timer_multiplier;
11098 	}
11099 
11100 	/* Heartbeat is only sent once every 2 seconds.
11101 	 *
11102 	 * The heartbeat is to tell the ASF firmware that the host
11103 	 * driver is still alive.  In the event that the OS crashes,
11104 	 * ASF needs to reset the hardware to free up the FIFO space
11105 	 * that may be filled with rx packets destined for the host.
11106 	 * If the FIFO is full, ASF will no longer function properly.
11107 	 *
11108 	 * Unintended resets have been reported on real time kernels
11109 	 * where the timer doesn't run on time.  Netpoll will also have
11110 	 * same problem.
11111 	 *
11112 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11113 	 * to check the ring condition when the heartbeat is expiring
11114 	 * before doing the reset.  This will prevent most unintended
11115 	 * resets.
11116 	 */
11117 	if (!--tp->asf_counter) {
11118 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11119 			tg3_wait_for_event_ack(tp);
11120 
11121 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11122 				      FWCMD_NICDRV_ALIVE3);
11123 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11124 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11125 				      TG3_FW_UPDATE_TIMEOUT_SEC);
11126 
11127 			tg3_generate_fw_event(tp);
11128 		}
11129 		tp->asf_counter = tp->asf_multiplier;
11130 	}
11131 
11132 	/* Update the APE heartbeat every 5 seconds.*/
11133 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11134 
11135 	spin_unlock(&tp->lock);
11136 
11137 restart_timer:
11138 	tp->timer.expires = jiffies + tp->timer_offset;
11139 	add_timer(&tp->timer);
11140 }
11141 
tg3_timer_init(struct tg3 * tp)11142 static void tg3_timer_init(struct tg3 *tp)
11143 {
11144 	if (tg3_flag(tp, TAGGED_STATUS) &&
11145 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
11146 	    !tg3_flag(tp, 57765_CLASS))
11147 		tp->timer_offset = HZ;
11148 	else
11149 		tp->timer_offset = HZ / 10;
11150 
11151 	BUG_ON(tp->timer_offset > HZ);
11152 
11153 	tp->timer_multiplier = (HZ / tp->timer_offset);
11154 	tp->asf_multiplier = (HZ / tp->timer_offset) *
11155 			     TG3_FW_UPDATE_FREQ_SEC;
11156 
11157 	timer_setup(&tp->timer, tg3_timer, 0);
11158 }
11159 
tg3_timer_start(struct tg3 * tp)11160 static void tg3_timer_start(struct tg3 *tp)
11161 {
11162 	tp->asf_counter   = tp->asf_multiplier;
11163 	tp->timer_counter = tp->timer_multiplier;
11164 
11165 	tp->timer.expires = jiffies + tp->timer_offset;
11166 	add_timer(&tp->timer);
11167 }
11168 
tg3_timer_stop(struct tg3 * tp)11169 static void tg3_timer_stop(struct tg3 *tp)
11170 {
11171 	del_timer_sync(&tp->timer);
11172 }
11173 
11174 /* Restart hardware after configuration changes, self-test, etc.
11175  * Invoked with tp->lock held.
11176  */
tg3_restart_hw(struct tg3 * tp,bool reset_phy)11177 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11178 	__releases(tp->lock)
11179 	__acquires(tp->lock)
11180 {
11181 	int err;
11182 
11183 	err = tg3_init_hw(tp, reset_phy);
11184 	if (err) {
11185 		netdev_err(tp->dev,
11186 			   "Failed to re-initialize device, aborting\n");
11187 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11188 		tg3_full_unlock(tp);
11189 		tg3_timer_stop(tp);
11190 		tp->irq_sync = 0;
11191 		tg3_napi_enable(tp);
11192 		dev_close(tp->dev);
11193 		tg3_full_lock(tp, 0);
11194 	}
11195 	return err;
11196 }
11197 
tg3_reset_task(struct work_struct * work)11198 static void tg3_reset_task(struct work_struct *work)
11199 {
11200 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
11201 	int err;
11202 
11203 	rtnl_lock();
11204 	tg3_full_lock(tp, 0);
11205 
11206 	if (tp->pcierr_recovery || !netif_running(tp->dev) ||
11207 	    tp->pdev->error_state != pci_channel_io_normal) {
11208 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11209 		tg3_full_unlock(tp);
11210 		rtnl_unlock();
11211 		return;
11212 	}
11213 
11214 	tg3_full_unlock(tp);
11215 
11216 	tg3_phy_stop(tp);
11217 
11218 	tg3_netif_stop(tp);
11219 
11220 	tg3_full_lock(tp, 1);
11221 
11222 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11223 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11224 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11225 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11226 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11227 	}
11228 
11229 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11230 	err = tg3_init_hw(tp, true);
11231 	if (err) {
11232 		tg3_full_unlock(tp);
11233 		tp->irq_sync = 0;
11234 		tg3_napi_enable(tp);
11235 		/* Clear this flag so that tg3_reset_task_cancel() will not
11236 		 * call cancel_work_sync() and wait forever.
11237 		 */
11238 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11239 		dev_close(tp->dev);
11240 		goto out;
11241 	}
11242 
11243 	tg3_netif_start(tp);
11244 
11245 	tg3_full_unlock(tp);
11246 
11247 	if (!err)
11248 		tg3_phy_start(tp);
11249 
11250 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11251 out:
11252 	rtnl_unlock();
11253 }
11254 
tg3_request_irq(struct tg3 * tp,int irq_num)11255 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11256 {
11257 	irq_handler_t fn;
11258 	unsigned long flags;
11259 	char *name;
11260 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11261 
11262 	if (tp->irq_cnt == 1)
11263 		name = tp->dev->name;
11264 	else {
11265 		name = &tnapi->irq_lbl[0];
11266 		if (tnapi->tx_buffers && tnapi->rx_rcb)
11267 			snprintf(name, IFNAMSIZ,
11268 				 "%s-txrx-%d", tp->dev->name, irq_num);
11269 		else if (tnapi->tx_buffers)
11270 			snprintf(name, IFNAMSIZ,
11271 				 "%s-tx-%d", tp->dev->name, irq_num);
11272 		else if (tnapi->rx_rcb)
11273 			snprintf(name, IFNAMSIZ,
11274 				 "%s-rx-%d", tp->dev->name, irq_num);
11275 		else
11276 			snprintf(name, IFNAMSIZ,
11277 				 "%s-%d", tp->dev->name, irq_num);
11278 		name[IFNAMSIZ-1] = 0;
11279 	}
11280 
11281 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11282 		fn = tg3_msi;
11283 		if (tg3_flag(tp, 1SHOT_MSI))
11284 			fn = tg3_msi_1shot;
11285 		flags = 0;
11286 	} else {
11287 		fn = tg3_interrupt;
11288 		if (tg3_flag(tp, TAGGED_STATUS))
11289 			fn = tg3_interrupt_tagged;
11290 		flags = IRQF_SHARED;
11291 	}
11292 
11293 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11294 }
11295 
tg3_test_interrupt(struct tg3 * tp)11296 static int tg3_test_interrupt(struct tg3 *tp)
11297 {
11298 	struct tg3_napi *tnapi = &tp->napi[0];
11299 	struct net_device *dev = tp->dev;
11300 	int err, i, intr_ok = 0;
11301 	u32 val;
11302 
11303 	if (!netif_running(dev))
11304 		return -ENODEV;
11305 
11306 	tg3_disable_ints(tp);
11307 
11308 	free_irq(tnapi->irq_vec, tnapi);
11309 
11310 	/*
11311 	 * Turn off MSI one shot mode.  Otherwise this test has no
11312 	 * observable way to know whether the interrupt was delivered.
11313 	 */
11314 	if (tg3_flag(tp, 57765_PLUS)) {
11315 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11316 		tw32(MSGINT_MODE, val);
11317 	}
11318 
11319 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11320 			  IRQF_SHARED, dev->name, tnapi);
11321 	if (err)
11322 		return err;
11323 
11324 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11325 	tg3_enable_ints(tp);
11326 
11327 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11328 	       tnapi->coal_now);
11329 
11330 	for (i = 0; i < 5; i++) {
11331 		u32 int_mbox, misc_host_ctrl;
11332 
11333 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11334 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11335 
11336 		if ((int_mbox != 0) ||
11337 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11338 			intr_ok = 1;
11339 			break;
11340 		}
11341 
11342 		if (tg3_flag(tp, 57765_PLUS) &&
11343 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11344 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11345 
11346 		msleep(10);
11347 	}
11348 
11349 	tg3_disable_ints(tp);
11350 
11351 	free_irq(tnapi->irq_vec, tnapi);
11352 
11353 	err = tg3_request_irq(tp, 0);
11354 
11355 	if (err)
11356 		return err;
11357 
11358 	if (intr_ok) {
11359 		/* Reenable MSI one shot mode. */
11360 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11361 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11362 			tw32(MSGINT_MODE, val);
11363 		}
11364 		return 0;
11365 	}
11366 
11367 	return -EIO;
11368 }
11369 
11370 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11371  * successfully restored
11372  */
tg3_test_msi(struct tg3 * tp)11373 static int tg3_test_msi(struct tg3 *tp)
11374 {
11375 	int err;
11376 	u16 pci_cmd;
11377 
11378 	if (!tg3_flag(tp, USING_MSI))
11379 		return 0;
11380 
11381 	/* Turn off SERR reporting in case MSI terminates with Master
11382 	 * Abort.
11383 	 */
11384 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11385 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11386 			      pci_cmd & ~PCI_COMMAND_SERR);
11387 
11388 	err = tg3_test_interrupt(tp);
11389 
11390 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11391 
11392 	if (!err)
11393 		return 0;
11394 
11395 	/* other failures */
11396 	if (err != -EIO)
11397 		return err;
11398 
11399 	/* MSI test failed, go back to INTx mode */
11400 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11401 		    "to INTx mode. Please report this failure to the PCI "
11402 		    "maintainer and include system chipset information\n");
11403 
11404 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11405 
11406 	pci_disable_msi(tp->pdev);
11407 
11408 	tg3_flag_clear(tp, USING_MSI);
11409 	tp->napi[0].irq_vec = tp->pdev->irq;
11410 
11411 	err = tg3_request_irq(tp, 0);
11412 	if (err)
11413 		return err;
11414 
11415 	/* Need to reset the chip because the MSI cycle may have terminated
11416 	 * with Master Abort.
11417 	 */
11418 	tg3_full_lock(tp, 1);
11419 
11420 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11421 	err = tg3_init_hw(tp, true);
11422 
11423 	tg3_full_unlock(tp);
11424 
11425 	if (err)
11426 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11427 
11428 	return err;
11429 }
11430 
tg3_request_firmware(struct tg3 * tp)11431 static int tg3_request_firmware(struct tg3 *tp)
11432 {
11433 	const struct tg3_firmware_hdr *fw_hdr;
11434 
11435 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11436 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11437 			   tp->fw_needed);
11438 		return -ENOENT;
11439 	}
11440 
11441 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11442 
11443 	/* Firmware blob starts with version numbers, followed by
11444 	 * start address and _full_ length including BSS sections
11445 	 * (which must be longer than the actual data, of course
11446 	 */
11447 
11448 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11449 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11450 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11451 			   tp->fw_len, tp->fw_needed);
11452 		release_firmware(tp->fw);
11453 		tp->fw = NULL;
11454 		return -EINVAL;
11455 	}
11456 
11457 	/* We no longer need firmware; we have it. */
11458 	tp->fw_needed = NULL;
11459 	return 0;
11460 }
11461 
tg3_irq_count(struct tg3 * tp)11462 static u32 tg3_irq_count(struct tg3 *tp)
11463 {
11464 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11465 
11466 	if (irq_cnt > 1) {
11467 		/* We want as many rx rings enabled as there are cpus.
11468 		 * In multiqueue MSI-X mode, the first MSI-X vector
11469 		 * only deals with link interrupts, etc, so we add
11470 		 * one to the number of vectors we are requesting.
11471 		 */
11472 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11473 	}
11474 
11475 	return irq_cnt;
11476 }
11477 
tg3_enable_msix(struct tg3 * tp)11478 static bool tg3_enable_msix(struct tg3 *tp)
11479 {
11480 	int i, rc;
11481 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11482 
11483 	tp->txq_cnt = tp->txq_req;
11484 	tp->rxq_cnt = tp->rxq_req;
11485 	if (!tp->rxq_cnt)
11486 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11487 	if (tp->rxq_cnt > tp->rxq_max)
11488 		tp->rxq_cnt = tp->rxq_max;
11489 
11490 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11491 	 * scheduling of the TX rings can cause starvation of rings with
11492 	 * small packets when other rings have TSO or jumbo packets.
11493 	 */
11494 	if (!tp->txq_req)
11495 		tp->txq_cnt = 1;
11496 
11497 	tp->irq_cnt = tg3_irq_count(tp);
11498 
11499 	for (i = 0; i < tp->irq_max; i++) {
11500 		msix_ent[i].entry  = i;
11501 		msix_ent[i].vector = 0;
11502 	}
11503 
11504 	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11505 	if (rc < 0) {
11506 		return false;
11507 	} else if (rc < tp->irq_cnt) {
11508 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11509 			      tp->irq_cnt, rc);
11510 		tp->irq_cnt = rc;
11511 		tp->rxq_cnt = max(rc - 1, 1);
11512 		if (tp->txq_cnt)
11513 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11514 	}
11515 
11516 	for (i = 0; i < tp->irq_max; i++)
11517 		tp->napi[i].irq_vec = msix_ent[i].vector;
11518 
11519 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11520 		pci_disable_msix(tp->pdev);
11521 		return false;
11522 	}
11523 
11524 	if (tp->irq_cnt == 1)
11525 		return true;
11526 
11527 	tg3_flag_set(tp, ENABLE_RSS);
11528 
11529 	if (tp->txq_cnt > 1)
11530 		tg3_flag_set(tp, ENABLE_TSS);
11531 
11532 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11533 
11534 	return true;
11535 }
11536 
tg3_ints_init(struct tg3 * tp)11537 static void tg3_ints_init(struct tg3 *tp)
11538 {
11539 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11540 	    !tg3_flag(tp, TAGGED_STATUS)) {
11541 		/* All MSI supporting chips should support tagged
11542 		 * status.  Assert that this is the case.
11543 		 */
11544 		netdev_warn(tp->dev,
11545 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11546 		goto defcfg;
11547 	}
11548 
11549 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11550 		tg3_flag_set(tp, USING_MSIX);
11551 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11552 		tg3_flag_set(tp, USING_MSI);
11553 
11554 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11555 		u32 msi_mode = tr32(MSGINT_MODE);
11556 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11557 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11558 		if (!tg3_flag(tp, 1SHOT_MSI))
11559 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11560 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11561 	}
11562 defcfg:
11563 	if (!tg3_flag(tp, USING_MSIX)) {
11564 		tp->irq_cnt = 1;
11565 		tp->napi[0].irq_vec = tp->pdev->irq;
11566 	}
11567 
11568 	if (tp->irq_cnt == 1) {
11569 		tp->txq_cnt = 1;
11570 		tp->rxq_cnt = 1;
11571 		netif_set_real_num_tx_queues(tp->dev, 1);
11572 		netif_set_real_num_rx_queues(tp->dev, 1);
11573 	}
11574 }
11575 
tg3_ints_fini(struct tg3 * tp)11576 static void tg3_ints_fini(struct tg3 *tp)
11577 {
11578 	if (tg3_flag(tp, USING_MSIX))
11579 		pci_disable_msix(tp->pdev);
11580 	else if (tg3_flag(tp, USING_MSI))
11581 		pci_disable_msi(tp->pdev);
11582 	tg3_flag_clear(tp, USING_MSI);
11583 	tg3_flag_clear(tp, USING_MSIX);
11584 	tg3_flag_clear(tp, ENABLE_RSS);
11585 	tg3_flag_clear(tp, ENABLE_TSS);
11586 }
11587 
tg3_start(struct tg3 * tp,bool reset_phy,bool test_irq,bool init)11588 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11589 		     bool init)
11590 {
11591 	struct net_device *dev = tp->dev;
11592 	int i, err;
11593 
11594 	/*
11595 	 * Setup interrupts first so we know how
11596 	 * many NAPI resources to allocate
11597 	 */
11598 	tg3_ints_init(tp);
11599 
11600 	tg3_rss_check_indir_tbl(tp);
11601 
11602 	/* The placement of this call is tied
11603 	 * to the setup and use of Host TX descriptors.
11604 	 */
11605 	err = tg3_alloc_consistent(tp);
11606 	if (err)
11607 		goto out_ints_fini;
11608 
11609 	tg3_napi_init(tp);
11610 
11611 	tg3_napi_enable(tp);
11612 
11613 	for (i = 0; i < tp->irq_cnt; i++) {
11614 		err = tg3_request_irq(tp, i);
11615 		if (err) {
11616 			for (i--; i >= 0; i--) {
11617 				struct tg3_napi *tnapi = &tp->napi[i];
11618 
11619 				free_irq(tnapi->irq_vec, tnapi);
11620 			}
11621 			goto out_napi_fini;
11622 		}
11623 	}
11624 
11625 	tg3_full_lock(tp, 0);
11626 
11627 	if (init)
11628 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11629 
11630 	err = tg3_init_hw(tp, reset_phy);
11631 	if (err) {
11632 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11633 		tg3_free_rings(tp);
11634 	}
11635 
11636 	tg3_full_unlock(tp);
11637 
11638 	if (err)
11639 		goto out_free_irq;
11640 
11641 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11642 		err = tg3_test_msi(tp);
11643 
11644 		if (err) {
11645 			tg3_full_lock(tp, 0);
11646 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11647 			tg3_free_rings(tp);
11648 			tg3_full_unlock(tp);
11649 
11650 			goto out_napi_fini;
11651 		}
11652 
11653 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11654 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11655 
11656 			tw32(PCIE_TRANSACTION_CFG,
11657 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11658 		}
11659 	}
11660 
11661 	tg3_phy_start(tp);
11662 
11663 	tg3_hwmon_open(tp);
11664 
11665 	tg3_full_lock(tp, 0);
11666 
11667 	tg3_timer_start(tp);
11668 	tg3_flag_set(tp, INIT_COMPLETE);
11669 	tg3_enable_ints(tp);
11670 
11671 	tg3_ptp_resume(tp);
11672 
11673 	tg3_full_unlock(tp);
11674 
11675 	netif_tx_start_all_queues(dev);
11676 
11677 	/*
11678 	 * Reset loopback feature if it was turned on while the device was down
11679 	 * make sure that it's installed properly now.
11680 	 */
11681 	if (dev->features & NETIF_F_LOOPBACK)
11682 		tg3_set_loopback(dev, dev->features);
11683 
11684 	return 0;
11685 
11686 out_free_irq:
11687 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11688 		struct tg3_napi *tnapi = &tp->napi[i];
11689 		free_irq(tnapi->irq_vec, tnapi);
11690 	}
11691 
11692 out_napi_fini:
11693 	tg3_napi_disable(tp);
11694 	tg3_napi_fini(tp);
11695 	tg3_free_consistent(tp);
11696 
11697 out_ints_fini:
11698 	tg3_ints_fini(tp);
11699 
11700 	return err;
11701 }
11702 
tg3_stop(struct tg3 * tp)11703 static void tg3_stop(struct tg3 *tp)
11704 {
11705 	int i;
11706 
11707 	tg3_reset_task_cancel(tp);
11708 	tg3_netif_stop(tp);
11709 
11710 	tg3_timer_stop(tp);
11711 
11712 	tg3_hwmon_close(tp);
11713 
11714 	tg3_phy_stop(tp);
11715 
11716 	tg3_full_lock(tp, 1);
11717 
11718 	tg3_disable_ints(tp);
11719 
11720 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11721 	tg3_free_rings(tp);
11722 	tg3_flag_clear(tp, INIT_COMPLETE);
11723 
11724 	tg3_full_unlock(tp);
11725 
11726 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11727 		struct tg3_napi *tnapi = &tp->napi[i];
11728 		free_irq(tnapi->irq_vec, tnapi);
11729 	}
11730 
11731 	tg3_ints_fini(tp);
11732 
11733 	tg3_napi_fini(tp);
11734 
11735 	tg3_free_consistent(tp);
11736 }
11737 
tg3_open(struct net_device * dev)11738 static int tg3_open(struct net_device *dev)
11739 {
11740 	struct tg3 *tp = netdev_priv(dev);
11741 	int err;
11742 
11743 	if (tp->pcierr_recovery) {
11744 		netdev_err(dev, "Failed to open device. PCI error recovery "
11745 			   "in progress\n");
11746 		return -EAGAIN;
11747 	}
11748 
11749 	if (tp->fw_needed) {
11750 		err = tg3_request_firmware(tp);
11751 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11752 			if (err) {
11753 				netdev_warn(tp->dev, "EEE capability disabled\n");
11754 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11755 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11756 				netdev_warn(tp->dev, "EEE capability restored\n");
11757 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11758 			}
11759 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11760 			if (err)
11761 				return err;
11762 		} else if (err) {
11763 			netdev_warn(tp->dev, "TSO capability disabled\n");
11764 			tg3_flag_clear(tp, TSO_CAPABLE);
11765 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11766 			netdev_notice(tp->dev, "TSO capability restored\n");
11767 			tg3_flag_set(tp, TSO_CAPABLE);
11768 		}
11769 	}
11770 
11771 	tg3_carrier_off(tp);
11772 
11773 	err = tg3_power_up(tp);
11774 	if (err)
11775 		return err;
11776 
11777 	tg3_full_lock(tp, 0);
11778 
11779 	tg3_disable_ints(tp);
11780 	tg3_flag_clear(tp, INIT_COMPLETE);
11781 
11782 	tg3_full_unlock(tp);
11783 
11784 	err = tg3_start(tp,
11785 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11786 			true, true);
11787 	if (err) {
11788 		tg3_frob_aux_power(tp, false);
11789 		pci_set_power_state(tp->pdev, PCI_D3hot);
11790 	}
11791 
11792 	return err;
11793 }
11794 
tg3_close(struct net_device * dev)11795 static int tg3_close(struct net_device *dev)
11796 {
11797 	struct tg3 *tp = netdev_priv(dev);
11798 
11799 	if (tp->pcierr_recovery) {
11800 		netdev_err(dev, "Failed to close device. PCI error recovery "
11801 			   "in progress\n");
11802 		return -EAGAIN;
11803 	}
11804 
11805 	tg3_stop(tp);
11806 
11807 	if (pci_device_is_present(tp->pdev)) {
11808 		tg3_power_down_prepare(tp);
11809 
11810 		tg3_carrier_off(tp);
11811 	}
11812 	return 0;
11813 }
11814 
get_stat64(tg3_stat64_t * val)11815 static inline u64 get_stat64(tg3_stat64_t *val)
11816 {
11817        return ((u64)val->high << 32) | ((u64)val->low);
11818 }
11819 
tg3_calc_crc_errors(struct tg3 * tp)11820 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11821 {
11822 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11823 
11824 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11825 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11826 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11827 		u32 val;
11828 
11829 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11830 			tg3_writephy(tp, MII_TG3_TEST1,
11831 				     val | MII_TG3_TEST1_CRC_EN);
11832 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11833 		} else
11834 			val = 0;
11835 
11836 		tp->phy_crc_errors += val;
11837 
11838 		return tp->phy_crc_errors;
11839 	}
11840 
11841 	return get_stat64(&hw_stats->rx_fcs_errors);
11842 }
11843 
11844 #define ESTAT_ADD(member) \
11845 	estats->member =	old_estats->member + \
11846 				get_stat64(&hw_stats->member)
11847 
tg3_get_estats(struct tg3 * tp,struct tg3_ethtool_stats * estats)11848 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11849 {
11850 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11851 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11852 
11853 	ESTAT_ADD(rx_octets);
11854 	ESTAT_ADD(rx_fragments);
11855 	ESTAT_ADD(rx_ucast_packets);
11856 	ESTAT_ADD(rx_mcast_packets);
11857 	ESTAT_ADD(rx_bcast_packets);
11858 	ESTAT_ADD(rx_fcs_errors);
11859 	ESTAT_ADD(rx_align_errors);
11860 	ESTAT_ADD(rx_xon_pause_rcvd);
11861 	ESTAT_ADD(rx_xoff_pause_rcvd);
11862 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11863 	ESTAT_ADD(rx_xoff_entered);
11864 	ESTAT_ADD(rx_frame_too_long_errors);
11865 	ESTAT_ADD(rx_jabbers);
11866 	ESTAT_ADD(rx_undersize_packets);
11867 	ESTAT_ADD(rx_in_length_errors);
11868 	ESTAT_ADD(rx_out_length_errors);
11869 	ESTAT_ADD(rx_64_or_less_octet_packets);
11870 	ESTAT_ADD(rx_65_to_127_octet_packets);
11871 	ESTAT_ADD(rx_128_to_255_octet_packets);
11872 	ESTAT_ADD(rx_256_to_511_octet_packets);
11873 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11874 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11875 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11876 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11877 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11878 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11879 
11880 	ESTAT_ADD(tx_octets);
11881 	ESTAT_ADD(tx_collisions);
11882 	ESTAT_ADD(tx_xon_sent);
11883 	ESTAT_ADD(tx_xoff_sent);
11884 	ESTAT_ADD(tx_flow_control);
11885 	ESTAT_ADD(tx_mac_errors);
11886 	ESTAT_ADD(tx_single_collisions);
11887 	ESTAT_ADD(tx_mult_collisions);
11888 	ESTAT_ADD(tx_deferred);
11889 	ESTAT_ADD(tx_excessive_collisions);
11890 	ESTAT_ADD(tx_late_collisions);
11891 	ESTAT_ADD(tx_collide_2times);
11892 	ESTAT_ADD(tx_collide_3times);
11893 	ESTAT_ADD(tx_collide_4times);
11894 	ESTAT_ADD(tx_collide_5times);
11895 	ESTAT_ADD(tx_collide_6times);
11896 	ESTAT_ADD(tx_collide_7times);
11897 	ESTAT_ADD(tx_collide_8times);
11898 	ESTAT_ADD(tx_collide_9times);
11899 	ESTAT_ADD(tx_collide_10times);
11900 	ESTAT_ADD(tx_collide_11times);
11901 	ESTAT_ADD(tx_collide_12times);
11902 	ESTAT_ADD(tx_collide_13times);
11903 	ESTAT_ADD(tx_collide_14times);
11904 	ESTAT_ADD(tx_collide_15times);
11905 	ESTAT_ADD(tx_ucast_packets);
11906 	ESTAT_ADD(tx_mcast_packets);
11907 	ESTAT_ADD(tx_bcast_packets);
11908 	ESTAT_ADD(tx_carrier_sense_errors);
11909 	ESTAT_ADD(tx_discards);
11910 	ESTAT_ADD(tx_errors);
11911 
11912 	ESTAT_ADD(dma_writeq_full);
11913 	ESTAT_ADD(dma_write_prioq_full);
11914 	ESTAT_ADD(rxbds_empty);
11915 	ESTAT_ADD(rx_discards);
11916 	ESTAT_ADD(rx_errors);
11917 	ESTAT_ADD(rx_threshold_hit);
11918 
11919 	ESTAT_ADD(dma_readq_full);
11920 	ESTAT_ADD(dma_read_prioq_full);
11921 	ESTAT_ADD(tx_comp_queue_full);
11922 
11923 	ESTAT_ADD(ring_set_send_prod_index);
11924 	ESTAT_ADD(ring_status_update);
11925 	ESTAT_ADD(nic_irqs);
11926 	ESTAT_ADD(nic_avoided_irqs);
11927 	ESTAT_ADD(nic_tx_threshold_hit);
11928 
11929 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11930 }
11931 
tg3_get_nstats(struct tg3 * tp,struct rtnl_link_stats64 * stats)11932 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11933 {
11934 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11935 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11936 	unsigned long rx_dropped;
11937 	unsigned long tx_dropped;
11938 	int i;
11939 
11940 	stats->rx_packets = old_stats->rx_packets +
11941 		get_stat64(&hw_stats->rx_ucast_packets) +
11942 		get_stat64(&hw_stats->rx_mcast_packets) +
11943 		get_stat64(&hw_stats->rx_bcast_packets);
11944 
11945 	stats->tx_packets = old_stats->tx_packets +
11946 		get_stat64(&hw_stats->tx_ucast_packets) +
11947 		get_stat64(&hw_stats->tx_mcast_packets) +
11948 		get_stat64(&hw_stats->tx_bcast_packets);
11949 
11950 	stats->rx_bytes = old_stats->rx_bytes +
11951 		get_stat64(&hw_stats->rx_octets);
11952 	stats->tx_bytes = old_stats->tx_bytes +
11953 		get_stat64(&hw_stats->tx_octets);
11954 
11955 	stats->rx_errors = old_stats->rx_errors +
11956 		get_stat64(&hw_stats->rx_errors);
11957 	stats->tx_errors = old_stats->tx_errors +
11958 		get_stat64(&hw_stats->tx_errors) +
11959 		get_stat64(&hw_stats->tx_mac_errors) +
11960 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11961 		get_stat64(&hw_stats->tx_discards);
11962 
11963 	stats->multicast = old_stats->multicast +
11964 		get_stat64(&hw_stats->rx_mcast_packets);
11965 	stats->collisions = old_stats->collisions +
11966 		get_stat64(&hw_stats->tx_collisions);
11967 
11968 	stats->rx_length_errors = old_stats->rx_length_errors +
11969 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11970 		get_stat64(&hw_stats->rx_undersize_packets);
11971 
11972 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11973 		get_stat64(&hw_stats->rx_align_errors);
11974 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11975 		get_stat64(&hw_stats->tx_discards);
11976 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11977 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11978 
11979 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11980 		tg3_calc_crc_errors(tp);
11981 
11982 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11983 		get_stat64(&hw_stats->rx_discards);
11984 
11985 	/* Aggregate per-queue counters. The per-queue counters are updated
11986 	 * by a single writer, race-free. The result computed by this loop
11987 	 * might not be 100% accurate (counters can be updated in the middle of
11988 	 * the loop) but the next tg3_get_nstats() will recompute the current
11989 	 * value so it is acceptable.
11990 	 *
11991 	 * Note that these counters wrap around at 4G on 32bit machines.
11992 	 */
11993 	rx_dropped = (unsigned long)(old_stats->rx_dropped);
11994 	tx_dropped = (unsigned long)(old_stats->tx_dropped);
11995 
11996 	for (i = 0; i < tp->irq_cnt; i++) {
11997 		struct tg3_napi *tnapi = &tp->napi[i];
11998 
11999 		rx_dropped += tnapi->rx_dropped;
12000 		tx_dropped += tnapi->tx_dropped;
12001 	}
12002 
12003 	stats->rx_dropped = rx_dropped;
12004 	stats->tx_dropped = tx_dropped;
12005 }
12006 
tg3_get_regs_len(struct net_device * dev)12007 static int tg3_get_regs_len(struct net_device *dev)
12008 {
12009 	return TG3_REG_BLK_SIZE;
12010 }
12011 
tg3_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)12012 static void tg3_get_regs(struct net_device *dev,
12013 		struct ethtool_regs *regs, void *_p)
12014 {
12015 	struct tg3 *tp = netdev_priv(dev);
12016 
12017 	regs->version = 0;
12018 
12019 	memset(_p, 0, TG3_REG_BLK_SIZE);
12020 
12021 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12022 		return;
12023 
12024 	tg3_full_lock(tp, 0);
12025 
12026 	tg3_dump_legacy_regs(tp, (u32 *)_p);
12027 
12028 	tg3_full_unlock(tp);
12029 }
12030 
tg3_get_eeprom_len(struct net_device * dev)12031 static int tg3_get_eeprom_len(struct net_device *dev)
12032 {
12033 	struct tg3 *tp = netdev_priv(dev);
12034 
12035 	return tp->nvram_size;
12036 }
12037 
tg3_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12038 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12039 {
12040 	struct tg3 *tp = netdev_priv(dev);
12041 	int ret, cpmu_restore = 0;
12042 	u8  *pd;
12043 	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12044 	__be32 val;
12045 
12046 	if (tg3_flag(tp, NO_NVRAM))
12047 		return -EINVAL;
12048 
12049 	offset = eeprom->offset;
12050 	len = eeprom->len;
12051 	eeprom->len = 0;
12052 
12053 	eeprom->magic = TG3_EEPROM_MAGIC;
12054 
12055 	/* Override clock, link aware and link idle modes */
12056 	if (tg3_flag(tp, CPMU_PRESENT)) {
12057 		cpmu_val = tr32(TG3_CPMU_CTRL);
12058 		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12059 				CPMU_CTRL_LINK_IDLE_MODE)) {
12060 			tw32(TG3_CPMU_CTRL, cpmu_val &
12061 					    ~(CPMU_CTRL_LINK_AWARE_MODE |
12062 					     CPMU_CTRL_LINK_IDLE_MODE));
12063 			cpmu_restore = 1;
12064 		}
12065 	}
12066 	tg3_override_clk(tp);
12067 
12068 	if (offset & 3) {
12069 		/* adjustments to start on required 4 byte boundary */
12070 		b_offset = offset & 3;
12071 		b_count = 4 - b_offset;
12072 		if (b_count > len) {
12073 			/* i.e. offset=1 len=2 */
12074 			b_count = len;
12075 		}
12076 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12077 		if (ret)
12078 			goto eeprom_done;
12079 		memcpy(data, ((char *)&val) + b_offset, b_count);
12080 		len -= b_count;
12081 		offset += b_count;
12082 		eeprom->len += b_count;
12083 	}
12084 
12085 	/* read bytes up to the last 4 byte boundary */
12086 	pd = &data[eeprom->len];
12087 	for (i = 0; i < (len - (len & 3)); i += 4) {
12088 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
12089 		if (ret) {
12090 			if (i)
12091 				i -= 4;
12092 			eeprom->len += i;
12093 			goto eeprom_done;
12094 		}
12095 		memcpy(pd + i, &val, 4);
12096 		if (need_resched()) {
12097 			if (signal_pending(current)) {
12098 				eeprom->len += i;
12099 				ret = -EINTR;
12100 				goto eeprom_done;
12101 			}
12102 			cond_resched();
12103 		}
12104 	}
12105 	eeprom->len += i;
12106 
12107 	if (len & 3) {
12108 		/* read last bytes not ending on 4 byte boundary */
12109 		pd = &data[eeprom->len];
12110 		b_count = len & 3;
12111 		b_offset = offset + len - b_count;
12112 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
12113 		if (ret)
12114 			goto eeprom_done;
12115 		memcpy(pd, &val, b_count);
12116 		eeprom->len += b_count;
12117 	}
12118 	ret = 0;
12119 
12120 eeprom_done:
12121 	/* Restore clock, link aware and link idle modes */
12122 	tg3_restore_clk(tp);
12123 	if (cpmu_restore)
12124 		tw32(TG3_CPMU_CTRL, cpmu_val);
12125 
12126 	return ret;
12127 }
12128 
tg3_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12129 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12130 {
12131 	struct tg3 *tp = netdev_priv(dev);
12132 	int ret;
12133 	u32 offset, len, b_offset, odd_len;
12134 	u8 *buf;
12135 	__be32 start = 0, end;
12136 
12137 	if (tg3_flag(tp, NO_NVRAM) ||
12138 	    eeprom->magic != TG3_EEPROM_MAGIC)
12139 		return -EINVAL;
12140 
12141 	offset = eeprom->offset;
12142 	len = eeprom->len;
12143 
12144 	if ((b_offset = (offset & 3))) {
12145 		/* adjustments to start on required 4 byte boundary */
12146 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12147 		if (ret)
12148 			return ret;
12149 		len += b_offset;
12150 		offset &= ~3;
12151 		if (len < 4)
12152 			len = 4;
12153 	}
12154 
12155 	odd_len = 0;
12156 	if (len & 3) {
12157 		/* adjustments to end on required 4 byte boundary */
12158 		odd_len = 1;
12159 		len = (len + 3) & ~3;
12160 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12161 		if (ret)
12162 			return ret;
12163 	}
12164 
12165 	buf = data;
12166 	if (b_offset || odd_len) {
12167 		buf = kmalloc(len, GFP_KERNEL);
12168 		if (!buf)
12169 			return -ENOMEM;
12170 		if (b_offset)
12171 			memcpy(buf, &start, 4);
12172 		if (odd_len)
12173 			memcpy(buf+len-4, &end, 4);
12174 		memcpy(buf + b_offset, data, eeprom->len);
12175 	}
12176 
12177 	ret = tg3_nvram_write_block(tp, offset, len, buf);
12178 
12179 	if (buf != data)
12180 		kfree(buf);
12181 
12182 	return ret;
12183 }
12184 
tg3_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)12185 static int tg3_get_link_ksettings(struct net_device *dev,
12186 				  struct ethtool_link_ksettings *cmd)
12187 {
12188 	struct tg3 *tp = netdev_priv(dev);
12189 	u32 supported, advertising;
12190 
12191 	if (tg3_flag(tp, USE_PHYLIB)) {
12192 		struct phy_device *phydev;
12193 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12194 			return -EAGAIN;
12195 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12196 		phy_ethtool_ksettings_get(phydev, cmd);
12197 
12198 		return 0;
12199 	}
12200 
12201 	supported = (SUPPORTED_Autoneg);
12202 
12203 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12204 		supported |= (SUPPORTED_1000baseT_Half |
12205 			      SUPPORTED_1000baseT_Full);
12206 
12207 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12208 		supported |= (SUPPORTED_100baseT_Half |
12209 			      SUPPORTED_100baseT_Full |
12210 			      SUPPORTED_10baseT_Half |
12211 			      SUPPORTED_10baseT_Full |
12212 			      SUPPORTED_TP);
12213 		cmd->base.port = PORT_TP;
12214 	} else {
12215 		supported |= SUPPORTED_FIBRE;
12216 		cmd->base.port = PORT_FIBRE;
12217 	}
12218 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12219 						supported);
12220 
12221 	advertising = tp->link_config.advertising;
12222 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
12223 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12224 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12225 				advertising |= ADVERTISED_Pause;
12226 			} else {
12227 				advertising |= ADVERTISED_Pause |
12228 					ADVERTISED_Asym_Pause;
12229 			}
12230 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12231 			advertising |= ADVERTISED_Asym_Pause;
12232 		}
12233 	}
12234 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12235 						advertising);
12236 
12237 	if (netif_running(dev) && tp->link_up) {
12238 		cmd->base.speed = tp->link_config.active_speed;
12239 		cmd->base.duplex = tp->link_config.active_duplex;
12240 		ethtool_convert_legacy_u32_to_link_mode(
12241 			cmd->link_modes.lp_advertising,
12242 			tp->link_config.rmt_adv);
12243 
12244 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12245 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12246 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12247 			else
12248 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
12249 		}
12250 	} else {
12251 		cmd->base.speed = SPEED_UNKNOWN;
12252 		cmd->base.duplex = DUPLEX_UNKNOWN;
12253 		cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12254 	}
12255 	cmd->base.phy_address = tp->phy_addr;
12256 	cmd->base.autoneg = tp->link_config.autoneg;
12257 	return 0;
12258 }
12259 
tg3_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)12260 static int tg3_set_link_ksettings(struct net_device *dev,
12261 				  const struct ethtool_link_ksettings *cmd)
12262 {
12263 	struct tg3 *tp = netdev_priv(dev);
12264 	u32 speed = cmd->base.speed;
12265 	u32 advertising;
12266 
12267 	if (tg3_flag(tp, USE_PHYLIB)) {
12268 		struct phy_device *phydev;
12269 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12270 			return -EAGAIN;
12271 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12272 		return phy_ethtool_ksettings_set(phydev, cmd);
12273 	}
12274 
12275 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
12276 	    cmd->base.autoneg != AUTONEG_DISABLE)
12277 		return -EINVAL;
12278 
12279 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
12280 	    cmd->base.duplex != DUPLEX_FULL &&
12281 	    cmd->base.duplex != DUPLEX_HALF)
12282 		return -EINVAL;
12283 
12284 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
12285 						cmd->link_modes.advertising);
12286 
12287 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12288 		u32 mask = ADVERTISED_Autoneg |
12289 			   ADVERTISED_Pause |
12290 			   ADVERTISED_Asym_Pause;
12291 
12292 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12293 			mask |= ADVERTISED_1000baseT_Half |
12294 				ADVERTISED_1000baseT_Full;
12295 
12296 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12297 			mask |= ADVERTISED_100baseT_Half |
12298 				ADVERTISED_100baseT_Full |
12299 				ADVERTISED_10baseT_Half |
12300 				ADVERTISED_10baseT_Full |
12301 				ADVERTISED_TP;
12302 		else
12303 			mask |= ADVERTISED_FIBRE;
12304 
12305 		if (advertising & ~mask)
12306 			return -EINVAL;
12307 
12308 		mask &= (ADVERTISED_1000baseT_Half |
12309 			 ADVERTISED_1000baseT_Full |
12310 			 ADVERTISED_100baseT_Half |
12311 			 ADVERTISED_100baseT_Full |
12312 			 ADVERTISED_10baseT_Half |
12313 			 ADVERTISED_10baseT_Full);
12314 
12315 		advertising &= mask;
12316 	} else {
12317 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12318 			if (speed != SPEED_1000)
12319 				return -EINVAL;
12320 
12321 			if (cmd->base.duplex != DUPLEX_FULL)
12322 				return -EINVAL;
12323 		} else {
12324 			if (speed != SPEED_100 &&
12325 			    speed != SPEED_10)
12326 				return -EINVAL;
12327 		}
12328 	}
12329 
12330 	tg3_full_lock(tp, 0);
12331 
12332 	tp->link_config.autoneg = cmd->base.autoneg;
12333 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12334 		tp->link_config.advertising = (advertising |
12335 					      ADVERTISED_Autoneg);
12336 		tp->link_config.speed = SPEED_UNKNOWN;
12337 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12338 	} else {
12339 		tp->link_config.advertising = 0;
12340 		tp->link_config.speed = speed;
12341 		tp->link_config.duplex = cmd->base.duplex;
12342 	}
12343 
12344 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12345 
12346 	tg3_warn_mgmt_link_flap(tp);
12347 
12348 	if (netif_running(dev))
12349 		tg3_setup_phy(tp, true);
12350 
12351 	tg3_full_unlock(tp);
12352 
12353 	return 0;
12354 }
12355 
tg3_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)12356 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12357 {
12358 	struct tg3 *tp = netdev_priv(dev);
12359 
12360 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12361 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12362 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12363 }
12364 
tg3_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12365 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12366 {
12367 	struct tg3 *tp = netdev_priv(dev);
12368 
12369 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12370 		wol->supported = WAKE_MAGIC;
12371 	else
12372 		wol->supported = 0;
12373 	wol->wolopts = 0;
12374 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12375 		wol->wolopts = WAKE_MAGIC;
12376 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12377 }
12378 
tg3_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12379 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12380 {
12381 	struct tg3 *tp = netdev_priv(dev);
12382 	struct device *dp = &tp->pdev->dev;
12383 
12384 	if (wol->wolopts & ~WAKE_MAGIC)
12385 		return -EINVAL;
12386 	if ((wol->wolopts & WAKE_MAGIC) &&
12387 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12388 		return -EINVAL;
12389 
12390 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12391 
12392 	if (device_may_wakeup(dp))
12393 		tg3_flag_set(tp, WOL_ENABLE);
12394 	else
12395 		tg3_flag_clear(tp, WOL_ENABLE);
12396 
12397 	return 0;
12398 }
12399 
tg3_get_msglevel(struct net_device * dev)12400 static u32 tg3_get_msglevel(struct net_device *dev)
12401 {
12402 	struct tg3 *tp = netdev_priv(dev);
12403 	return tp->msg_enable;
12404 }
12405 
tg3_set_msglevel(struct net_device * dev,u32 value)12406 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12407 {
12408 	struct tg3 *tp = netdev_priv(dev);
12409 	tp->msg_enable = value;
12410 }
12411 
tg3_nway_reset(struct net_device * dev)12412 static int tg3_nway_reset(struct net_device *dev)
12413 {
12414 	struct tg3 *tp = netdev_priv(dev);
12415 	int r;
12416 
12417 	if (!netif_running(dev))
12418 		return -EAGAIN;
12419 
12420 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12421 		return -EINVAL;
12422 
12423 	tg3_warn_mgmt_link_flap(tp);
12424 
12425 	if (tg3_flag(tp, USE_PHYLIB)) {
12426 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12427 			return -EAGAIN;
12428 		r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12429 	} else {
12430 		u32 bmcr;
12431 
12432 		spin_lock_bh(&tp->lock);
12433 		r = -EINVAL;
12434 		tg3_readphy(tp, MII_BMCR, &bmcr);
12435 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12436 		    ((bmcr & BMCR_ANENABLE) ||
12437 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12438 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12439 						   BMCR_ANENABLE);
12440 			r = 0;
12441 		}
12442 		spin_unlock_bh(&tp->lock);
12443 	}
12444 
12445 	return r;
12446 }
12447 
tg3_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)12448 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12449 {
12450 	struct tg3 *tp = netdev_priv(dev);
12451 
12452 	ering->rx_max_pending = tp->rx_std_ring_mask;
12453 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12454 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12455 	else
12456 		ering->rx_jumbo_max_pending = 0;
12457 
12458 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12459 
12460 	ering->rx_pending = tp->rx_pending;
12461 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12462 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12463 	else
12464 		ering->rx_jumbo_pending = 0;
12465 
12466 	ering->tx_pending = tp->napi[0].tx_pending;
12467 }
12468 
tg3_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)12469 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12470 {
12471 	struct tg3 *tp = netdev_priv(dev);
12472 	int i, irq_sync = 0, err = 0;
12473 	bool reset_phy = false;
12474 
12475 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12476 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12477 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12478 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12479 	    (tg3_flag(tp, TSO_BUG) &&
12480 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12481 		return -EINVAL;
12482 
12483 	if (netif_running(dev)) {
12484 		tg3_phy_stop(tp);
12485 		tg3_netif_stop(tp);
12486 		irq_sync = 1;
12487 	}
12488 
12489 	tg3_full_lock(tp, irq_sync);
12490 
12491 	tp->rx_pending = ering->rx_pending;
12492 
12493 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12494 	    tp->rx_pending > 63)
12495 		tp->rx_pending = 63;
12496 
12497 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12498 		tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12499 
12500 	for (i = 0; i < tp->irq_max; i++)
12501 		tp->napi[i].tx_pending = ering->tx_pending;
12502 
12503 	if (netif_running(dev)) {
12504 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12505 		/* Reset PHY to avoid PHY lock up */
12506 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12507 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12508 		    tg3_asic_rev(tp) == ASIC_REV_5720)
12509 			reset_phy = true;
12510 
12511 		err = tg3_restart_hw(tp, reset_phy);
12512 		if (!err)
12513 			tg3_netif_start(tp);
12514 	}
12515 
12516 	tg3_full_unlock(tp);
12517 
12518 	if (irq_sync && !err)
12519 		tg3_phy_start(tp);
12520 
12521 	return err;
12522 }
12523 
tg3_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12524 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12525 {
12526 	struct tg3 *tp = netdev_priv(dev);
12527 
12528 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12529 
12530 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12531 		epause->rx_pause = 1;
12532 	else
12533 		epause->rx_pause = 0;
12534 
12535 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12536 		epause->tx_pause = 1;
12537 	else
12538 		epause->tx_pause = 0;
12539 }
12540 
tg3_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12541 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12542 {
12543 	struct tg3 *tp = netdev_priv(dev);
12544 	int err = 0;
12545 	bool reset_phy = false;
12546 
12547 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12548 		tg3_warn_mgmt_link_flap(tp);
12549 
12550 	if (tg3_flag(tp, USE_PHYLIB)) {
12551 		struct phy_device *phydev;
12552 
12553 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12554 
12555 		if (!phy_validate_pause(phydev, epause))
12556 			return -EINVAL;
12557 
12558 		tp->link_config.flowctrl = 0;
12559 		phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12560 		if (epause->rx_pause) {
12561 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12562 
12563 			if (epause->tx_pause) {
12564 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12565 			}
12566 		} else if (epause->tx_pause) {
12567 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12568 		}
12569 
12570 		if (epause->autoneg)
12571 			tg3_flag_set(tp, PAUSE_AUTONEG);
12572 		else
12573 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12574 
12575 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12576 			if (phydev->autoneg) {
12577 				/* phy_set_asym_pause() will
12578 				 * renegotiate the link to inform our
12579 				 * link partner of our flow control
12580 				 * settings, even if the flow control
12581 				 * is forced.  Let tg3_adjust_link()
12582 				 * do the final flow control setup.
12583 				 */
12584 				return 0;
12585 			}
12586 
12587 			if (!epause->autoneg)
12588 				tg3_setup_flow_control(tp, 0, 0);
12589 		}
12590 	} else {
12591 		int irq_sync = 0;
12592 
12593 		if (netif_running(dev)) {
12594 			tg3_netif_stop(tp);
12595 			irq_sync = 1;
12596 		}
12597 
12598 		tg3_full_lock(tp, irq_sync);
12599 
12600 		if (epause->autoneg)
12601 			tg3_flag_set(tp, PAUSE_AUTONEG);
12602 		else
12603 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12604 		if (epause->rx_pause)
12605 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12606 		else
12607 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12608 		if (epause->tx_pause)
12609 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12610 		else
12611 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12612 
12613 		if (netif_running(dev)) {
12614 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12615 			/* Reset PHY to avoid PHY lock up */
12616 			if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12617 			    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12618 			    tg3_asic_rev(tp) == ASIC_REV_5720)
12619 				reset_phy = true;
12620 
12621 			err = tg3_restart_hw(tp, reset_phy);
12622 			if (!err)
12623 				tg3_netif_start(tp);
12624 		}
12625 
12626 		tg3_full_unlock(tp);
12627 	}
12628 
12629 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12630 
12631 	return err;
12632 }
12633 
tg3_get_sset_count(struct net_device * dev,int sset)12634 static int tg3_get_sset_count(struct net_device *dev, int sset)
12635 {
12636 	switch (sset) {
12637 	case ETH_SS_TEST:
12638 		return TG3_NUM_TEST;
12639 	case ETH_SS_STATS:
12640 		return TG3_NUM_STATS;
12641 	default:
12642 		return -EOPNOTSUPP;
12643 	}
12644 }
12645 
tg3_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules __always_unused)12646 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12647 			 u32 *rules __always_unused)
12648 {
12649 	struct tg3 *tp = netdev_priv(dev);
12650 
12651 	if (!tg3_flag(tp, SUPPORT_MSIX))
12652 		return -EOPNOTSUPP;
12653 
12654 	switch (info->cmd) {
12655 	case ETHTOOL_GRXRINGS:
12656 		if (netif_running(tp->dev))
12657 			info->data = tp->rxq_cnt;
12658 		else {
12659 			info->data = num_online_cpus();
12660 			if (info->data > TG3_RSS_MAX_NUM_QS)
12661 				info->data = TG3_RSS_MAX_NUM_QS;
12662 		}
12663 
12664 		return 0;
12665 
12666 	default:
12667 		return -EOPNOTSUPP;
12668 	}
12669 }
12670 
tg3_get_rxfh_indir_size(struct net_device * dev)12671 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12672 {
12673 	u32 size = 0;
12674 	struct tg3 *tp = netdev_priv(dev);
12675 
12676 	if (tg3_flag(tp, SUPPORT_MSIX))
12677 		size = TG3_RSS_INDIR_TBL_SIZE;
12678 
12679 	return size;
12680 }
12681 
tg3_get_rxfh(struct net_device * dev,u32 * indir,u8 * key,u8 * hfunc)12682 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12683 {
12684 	struct tg3 *tp = netdev_priv(dev);
12685 	int i;
12686 
12687 	if (hfunc)
12688 		*hfunc = ETH_RSS_HASH_TOP;
12689 	if (!indir)
12690 		return 0;
12691 
12692 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12693 		indir[i] = tp->rss_ind_tbl[i];
12694 
12695 	return 0;
12696 }
12697 
tg3_set_rxfh(struct net_device * dev,const u32 * indir,const u8 * key,const u8 hfunc)12698 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12699 			const u8 hfunc)
12700 {
12701 	struct tg3 *tp = netdev_priv(dev);
12702 	size_t i;
12703 
12704 	/* We require at least one supported parameter to be changed and no
12705 	 * change in any of the unsupported parameters
12706 	 */
12707 	if (key ||
12708 	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12709 		return -EOPNOTSUPP;
12710 
12711 	if (!indir)
12712 		return 0;
12713 
12714 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12715 		tp->rss_ind_tbl[i] = indir[i];
12716 
12717 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12718 		return 0;
12719 
12720 	/* It is legal to write the indirection
12721 	 * table while the device is running.
12722 	 */
12723 	tg3_full_lock(tp, 0);
12724 	tg3_rss_write_indir_tbl(tp);
12725 	tg3_full_unlock(tp);
12726 
12727 	return 0;
12728 }
12729 
tg3_get_channels(struct net_device * dev,struct ethtool_channels * channel)12730 static void tg3_get_channels(struct net_device *dev,
12731 			     struct ethtool_channels *channel)
12732 {
12733 	struct tg3 *tp = netdev_priv(dev);
12734 	u32 deflt_qs = netif_get_num_default_rss_queues();
12735 
12736 	channel->max_rx = tp->rxq_max;
12737 	channel->max_tx = tp->txq_max;
12738 
12739 	if (netif_running(dev)) {
12740 		channel->rx_count = tp->rxq_cnt;
12741 		channel->tx_count = tp->txq_cnt;
12742 	} else {
12743 		if (tp->rxq_req)
12744 			channel->rx_count = tp->rxq_req;
12745 		else
12746 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12747 
12748 		if (tp->txq_req)
12749 			channel->tx_count = tp->txq_req;
12750 		else
12751 			channel->tx_count = min(deflt_qs, tp->txq_max);
12752 	}
12753 }
12754 
tg3_set_channels(struct net_device * dev,struct ethtool_channels * channel)12755 static int tg3_set_channels(struct net_device *dev,
12756 			    struct ethtool_channels *channel)
12757 {
12758 	struct tg3 *tp = netdev_priv(dev);
12759 
12760 	if (!tg3_flag(tp, SUPPORT_MSIX))
12761 		return -EOPNOTSUPP;
12762 
12763 	if (channel->rx_count > tp->rxq_max ||
12764 	    channel->tx_count > tp->txq_max)
12765 		return -EINVAL;
12766 
12767 	tp->rxq_req = channel->rx_count;
12768 	tp->txq_req = channel->tx_count;
12769 
12770 	if (!netif_running(dev))
12771 		return 0;
12772 
12773 	tg3_stop(tp);
12774 
12775 	tg3_carrier_off(tp);
12776 
12777 	tg3_start(tp, true, false, false);
12778 
12779 	return 0;
12780 }
12781 
tg3_get_strings(struct net_device * dev,u32 stringset,u8 * buf)12782 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12783 {
12784 	switch (stringset) {
12785 	case ETH_SS_STATS:
12786 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12787 		break;
12788 	case ETH_SS_TEST:
12789 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12790 		break;
12791 	default:
12792 		WARN_ON(1);	/* we need a WARN() */
12793 		break;
12794 	}
12795 }
12796 
tg3_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)12797 static int tg3_set_phys_id(struct net_device *dev,
12798 			    enum ethtool_phys_id_state state)
12799 {
12800 	struct tg3 *tp = netdev_priv(dev);
12801 
12802 	switch (state) {
12803 	case ETHTOOL_ID_ACTIVE:
12804 		return 1;	/* cycle on/off once per second */
12805 
12806 	case ETHTOOL_ID_ON:
12807 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12808 		     LED_CTRL_1000MBPS_ON |
12809 		     LED_CTRL_100MBPS_ON |
12810 		     LED_CTRL_10MBPS_ON |
12811 		     LED_CTRL_TRAFFIC_OVERRIDE |
12812 		     LED_CTRL_TRAFFIC_BLINK |
12813 		     LED_CTRL_TRAFFIC_LED);
12814 		break;
12815 
12816 	case ETHTOOL_ID_OFF:
12817 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12818 		     LED_CTRL_TRAFFIC_OVERRIDE);
12819 		break;
12820 
12821 	case ETHTOOL_ID_INACTIVE:
12822 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12823 		break;
12824 	}
12825 
12826 	return 0;
12827 }
12828 
tg3_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)12829 static void tg3_get_ethtool_stats(struct net_device *dev,
12830 				   struct ethtool_stats *estats, u64 *tmp_stats)
12831 {
12832 	struct tg3 *tp = netdev_priv(dev);
12833 
12834 	if (tp->hw_stats)
12835 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12836 	else
12837 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12838 }
12839 
tg3_vpd_readblock(struct tg3 * tp,u32 * vpdlen)12840 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12841 {
12842 	int i;
12843 	__be32 *buf;
12844 	u32 offset = 0, len = 0;
12845 	u32 magic, val;
12846 
12847 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12848 		return NULL;
12849 
12850 	if (magic == TG3_EEPROM_MAGIC) {
12851 		for (offset = TG3_NVM_DIR_START;
12852 		     offset < TG3_NVM_DIR_END;
12853 		     offset += TG3_NVM_DIRENT_SIZE) {
12854 			if (tg3_nvram_read(tp, offset, &val))
12855 				return NULL;
12856 
12857 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12858 			    TG3_NVM_DIRTYPE_EXTVPD)
12859 				break;
12860 		}
12861 
12862 		if (offset != TG3_NVM_DIR_END) {
12863 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12864 			if (tg3_nvram_read(tp, offset + 4, &offset))
12865 				return NULL;
12866 
12867 			offset = tg3_nvram_logical_addr(tp, offset);
12868 		}
12869 	}
12870 
12871 	if (!offset || !len) {
12872 		offset = TG3_NVM_VPD_OFF;
12873 		len = TG3_NVM_VPD_LEN;
12874 	}
12875 
12876 	buf = kmalloc(len, GFP_KERNEL);
12877 	if (buf == NULL)
12878 		return NULL;
12879 
12880 	if (magic == TG3_EEPROM_MAGIC) {
12881 		for (i = 0; i < len; i += 4) {
12882 			/* The data is in little-endian format in NVRAM.
12883 			 * Use the big-endian read routines to preserve
12884 			 * the byte order as it exists in NVRAM.
12885 			 */
12886 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12887 				goto error;
12888 		}
12889 	} else {
12890 		u8 *ptr;
12891 		ssize_t cnt;
12892 		unsigned int pos = 0;
12893 
12894 		ptr = (u8 *)&buf[0];
12895 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12896 			cnt = pci_read_vpd(tp->pdev, pos,
12897 					   len - pos, ptr);
12898 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
12899 				cnt = 0;
12900 			else if (cnt < 0)
12901 				goto error;
12902 		}
12903 		if (pos != len)
12904 			goto error;
12905 	}
12906 
12907 	*vpdlen = len;
12908 
12909 	return buf;
12910 
12911 error:
12912 	kfree(buf);
12913 	return NULL;
12914 }
12915 
12916 #define NVRAM_TEST_SIZE 0x100
12917 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12918 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12919 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12920 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12921 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12922 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12923 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12924 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12925 
tg3_test_nvram(struct tg3 * tp)12926 static int tg3_test_nvram(struct tg3 *tp)
12927 {
12928 	u32 csum, magic, len;
12929 	__be32 *buf;
12930 	int i, j, k, err = 0, size;
12931 
12932 	if (tg3_flag(tp, NO_NVRAM))
12933 		return 0;
12934 
12935 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12936 		return -EIO;
12937 
12938 	if (magic == TG3_EEPROM_MAGIC)
12939 		size = NVRAM_TEST_SIZE;
12940 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12941 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12942 		    TG3_EEPROM_SB_FORMAT_1) {
12943 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12944 			case TG3_EEPROM_SB_REVISION_0:
12945 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12946 				break;
12947 			case TG3_EEPROM_SB_REVISION_2:
12948 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12949 				break;
12950 			case TG3_EEPROM_SB_REVISION_3:
12951 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12952 				break;
12953 			case TG3_EEPROM_SB_REVISION_4:
12954 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12955 				break;
12956 			case TG3_EEPROM_SB_REVISION_5:
12957 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12958 				break;
12959 			case TG3_EEPROM_SB_REVISION_6:
12960 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12961 				break;
12962 			default:
12963 				return -EIO;
12964 			}
12965 		} else
12966 			return 0;
12967 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12968 		size = NVRAM_SELFBOOT_HW_SIZE;
12969 	else
12970 		return -EIO;
12971 
12972 	buf = kmalloc(size, GFP_KERNEL);
12973 	if (buf == NULL)
12974 		return -ENOMEM;
12975 
12976 	err = -EIO;
12977 	for (i = 0, j = 0; i < size; i += 4, j++) {
12978 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12979 		if (err)
12980 			break;
12981 	}
12982 	if (i < size)
12983 		goto out;
12984 
12985 	/* Selfboot format */
12986 	magic = be32_to_cpu(buf[0]);
12987 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12988 	    TG3_EEPROM_MAGIC_FW) {
12989 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12990 
12991 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12992 		    TG3_EEPROM_SB_REVISION_2) {
12993 			/* For rev 2, the csum doesn't include the MBA. */
12994 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12995 				csum8 += buf8[i];
12996 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12997 				csum8 += buf8[i];
12998 		} else {
12999 			for (i = 0; i < size; i++)
13000 				csum8 += buf8[i];
13001 		}
13002 
13003 		if (csum8 == 0) {
13004 			err = 0;
13005 			goto out;
13006 		}
13007 
13008 		err = -EIO;
13009 		goto out;
13010 	}
13011 
13012 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
13013 	    TG3_EEPROM_MAGIC_HW) {
13014 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
13015 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
13016 		u8 *buf8 = (u8 *) buf;
13017 
13018 		/* Separate the parity bits and the data bytes.  */
13019 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
13020 			if ((i == 0) || (i == 8)) {
13021 				int l;
13022 				u8 msk;
13023 
13024 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
13025 					parity[k++] = buf8[i] & msk;
13026 				i++;
13027 			} else if (i == 16) {
13028 				int l;
13029 				u8 msk;
13030 
13031 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13032 					parity[k++] = buf8[i] & msk;
13033 				i++;
13034 
13035 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13036 					parity[k++] = buf8[i] & msk;
13037 				i++;
13038 			}
13039 			data[j++] = buf8[i];
13040 		}
13041 
13042 		err = -EIO;
13043 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13044 			u8 hw8 = hweight8(data[i]);
13045 
13046 			if ((hw8 & 0x1) && parity[i])
13047 				goto out;
13048 			else if (!(hw8 & 0x1) && !parity[i])
13049 				goto out;
13050 		}
13051 		err = 0;
13052 		goto out;
13053 	}
13054 
13055 	err = -EIO;
13056 
13057 	/* Bootstrap checksum at offset 0x10 */
13058 	csum = calc_crc((unsigned char *) buf, 0x10);
13059 	if (csum != le32_to_cpu(buf[0x10/4]))
13060 		goto out;
13061 
13062 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13063 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13064 	if (csum != le32_to_cpu(buf[0xfc/4]))
13065 		goto out;
13066 
13067 	kfree(buf);
13068 
13069 	buf = tg3_vpd_readblock(tp, &len);
13070 	if (!buf)
13071 		return -ENOMEM;
13072 
13073 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13074 	if (i > 0) {
13075 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13076 		if (j < 0)
13077 			goto out;
13078 
13079 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13080 			goto out;
13081 
13082 		i += PCI_VPD_LRDT_TAG_SIZE;
13083 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13084 					      PCI_VPD_RO_KEYWORD_CHKSUM);
13085 		if (j > 0) {
13086 			u8 csum8 = 0;
13087 
13088 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
13089 
13090 			for (i = 0; i <= j; i++)
13091 				csum8 += ((u8 *)buf)[i];
13092 
13093 			if (csum8)
13094 				goto out;
13095 		}
13096 	}
13097 
13098 	err = 0;
13099 
13100 out:
13101 	kfree(buf);
13102 	return err;
13103 }
13104 
13105 #define TG3_SERDES_TIMEOUT_SEC	2
13106 #define TG3_COPPER_TIMEOUT_SEC	6
13107 
tg3_test_link(struct tg3 * tp)13108 static int tg3_test_link(struct tg3 *tp)
13109 {
13110 	int i, max;
13111 
13112 	if (!netif_running(tp->dev))
13113 		return -ENODEV;
13114 
13115 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13116 		max = TG3_SERDES_TIMEOUT_SEC;
13117 	else
13118 		max = TG3_COPPER_TIMEOUT_SEC;
13119 
13120 	for (i = 0; i < max; i++) {
13121 		if (tp->link_up)
13122 			return 0;
13123 
13124 		if (msleep_interruptible(1000))
13125 			break;
13126 	}
13127 
13128 	return -EIO;
13129 }
13130 
13131 /* Only test the commonly used registers */
tg3_test_registers(struct tg3 * tp)13132 static int tg3_test_registers(struct tg3 *tp)
13133 {
13134 	int i, is_5705, is_5750;
13135 	u32 offset, read_mask, write_mask, val, save_val, read_val;
13136 	static struct {
13137 		u16 offset;
13138 		u16 flags;
13139 #define TG3_FL_5705	0x1
13140 #define TG3_FL_NOT_5705	0x2
13141 #define TG3_FL_NOT_5788	0x4
13142 #define TG3_FL_NOT_5750	0x8
13143 		u32 read_mask;
13144 		u32 write_mask;
13145 	} reg_tbl[] = {
13146 		/* MAC Control Registers */
13147 		{ MAC_MODE, TG3_FL_NOT_5705,
13148 			0x00000000, 0x00ef6f8c },
13149 		{ MAC_MODE, TG3_FL_5705,
13150 			0x00000000, 0x01ef6b8c },
13151 		{ MAC_STATUS, TG3_FL_NOT_5705,
13152 			0x03800107, 0x00000000 },
13153 		{ MAC_STATUS, TG3_FL_5705,
13154 			0x03800100, 0x00000000 },
13155 		{ MAC_ADDR_0_HIGH, 0x0000,
13156 			0x00000000, 0x0000ffff },
13157 		{ MAC_ADDR_0_LOW, 0x0000,
13158 			0x00000000, 0xffffffff },
13159 		{ MAC_RX_MTU_SIZE, 0x0000,
13160 			0x00000000, 0x0000ffff },
13161 		{ MAC_TX_MODE, 0x0000,
13162 			0x00000000, 0x00000070 },
13163 		{ MAC_TX_LENGTHS, 0x0000,
13164 			0x00000000, 0x00003fff },
13165 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
13166 			0x00000000, 0x000007fc },
13167 		{ MAC_RX_MODE, TG3_FL_5705,
13168 			0x00000000, 0x000007dc },
13169 		{ MAC_HASH_REG_0, 0x0000,
13170 			0x00000000, 0xffffffff },
13171 		{ MAC_HASH_REG_1, 0x0000,
13172 			0x00000000, 0xffffffff },
13173 		{ MAC_HASH_REG_2, 0x0000,
13174 			0x00000000, 0xffffffff },
13175 		{ MAC_HASH_REG_3, 0x0000,
13176 			0x00000000, 0xffffffff },
13177 
13178 		/* Receive Data and Receive BD Initiator Control Registers. */
13179 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13180 			0x00000000, 0xffffffff },
13181 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13182 			0x00000000, 0xffffffff },
13183 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13184 			0x00000000, 0x00000003 },
13185 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13186 			0x00000000, 0xffffffff },
13187 		{ RCVDBDI_STD_BD+0, 0x0000,
13188 			0x00000000, 0xffffffff },
13189 		{ RCVDBDI_STD_BD+4, 0x0000,
13190 			0x00000000, 0xffffffff },
13191 		{ RCVDBDI_STD_BD+8, 0x0000,
13192 			0x00000000, 0xffff0002 },
13193 		{ RCVDBDI_STD_BD+0xc, 0x0000,
13194 			0x00000000, 0xffffffff },
13195 
13196 		/* Receive BD Initiator Control Registers. */
13197 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13198 			0x00000000, 0xffffffff },
13199 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
13200 			0x00000000, 0x000003ff },
13201 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13202 			0x00000000, 0xffffffff },
13203 
13204 		/* Host Coalescing Control Registers. */
13205 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
13206 			0x00000000, 0x00000004 },
13207 		{ HOSTCC_MODE, TG3_FL_5705,
13208 			0x00000000, 0x000000f6 },
13209 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13210 			0x00000000, 0xffffffff },
13211 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13212 			0x00000000, 0x000003ff },
13213 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13214 			0x00000000, 0xffffffff },
13215 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13216 			0x00000000, 0x000003ff },
13217 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13218 			0x00000000, 0xffffffff },
13219 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13220 			0x00000000, 0x000000ff },
13221 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13222 			0x00000000, 0xffffffff },
13223 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13224 			0x00000000, 0x000000ff },
13225 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13226 			0x00000000, 0xffffffff },
13227 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13228 			0x00000000, 0xffffffff },
13229 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13230 			0x00000000, 0xffffffff },
13231 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13232 			0x00000000, 0x000000ff },
13233 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13234 			0x00000000, 0xffffffff },
13235 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13236 			0x00000000, 0x000000ff },
13237 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13238 			0x00000000, 0xffffffff },
13239 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13240 			0x00000000, 0xffffffff },
13241 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13242 			0x00000000, 0xffffffff },
13243 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13244 			0x00000000, 0xffffffff },
13245 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13246 			0x00000000, 0xffffffff },
13247 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13248 			0xffffffff, 0x00000000 },
13249 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13250 			0xffffffff, 0x00000000 },
13251 
13252 		/* Buffer Manager Control Registers. */
13253 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13254 			0x00000000, 0x007fff80 },
13255 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13256 			0x00000000, 0x007fffff },
13257 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13258 			0x00000000, 0x0000003f },
13259 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13260 			0x00000000, 0x000001ff },
13261 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
13262 			0x00000000, 0x000001ff },
13263 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13264 			0xffffffff, 0x00000000 },
13265 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13266 			0xffffffff, 0x00000000 },
13267 
13268 		/* Mailbox Registers */
13269 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13270 			0x00000000, 0x000001ff },
13271 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13272 			0x00000000, 0x000001ff },
13273 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13274 			0x00000000, 0x000007ff },
13275 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13276 			0x00000000, 0x000001ff },
13277 
13278 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
13279 	};
13280 
13281 	is_5705 = is_5750 = 0;
13282 	if (tg3_flag(tp, 5705_PLUS)) {
13283 		is_5705 = 1;
13284 		if (tg3_flag(tp, 5750_PLUS))
13285 			is_5750 = 1;
13286 	}
13287 
13288 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13289 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13290 			continue;
13291 
13292 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13293 			continue;
13294 
13295 		if (tg3_flag(tp, IS_5788) &&
13296 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13297 			continue;
13298 
13299 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13300 			continue;
13301 
13302 		offset = (u32) reg_tbl[i].offset;
13303 		read_mask = reg_tbl[i].read_mask;
13304 		write_mask = reg_tbl[i].write_mask;
13305 
13306 		/* Save the original register content */
13307 		save_val = tr32(offset);
13308 
13309 		/* Determine the read-only value. */
13310 		read_val = save_val & read_mask;
13311 
13312 		/* Write zero to the register, then make sure the read-only bits
13313 		 * are not changed and the read/write bits are all zeros.
13314 		 */
13315 		tw32(offset, 0);
13316 
13317 		val = tr32(offset);
13318 
13319 		/* Test the read-only and read/write bits. */
13320 		if (((val & read_mask) != read_val) || (val & write_mask))
13321 			goto out;
13322 
13323 		/* Write ones to all the bits defined by RdMask and WrMask, then
13324 		 * make sure the read-only bits are not changed and the
13325 		 * read/write bits are all ones.
13326 		 */
13327 		tw32(offset, read_mask | write_mask);
13328 
13329 		val = tr32(offset);
13330 
13331 		/* Test the read-only bits. */
13332 		if ((val & read_mask) != read_val)
13333 			goto out;
13334 
13335 		/* Test the read/write bits. */
13336 		if ((val & write_mask) != write_mask)
13337 			goto out;
13338 
13339 		tw32(offset, save_val);
13340 	}
13341 
13342 	return 0;
13343 
13344 out:
13345 	if (netif_msg_hw(tp))
13346 		netdev_err(tp->dev,
13347 			   "Register test failed at offset %x\n", offset);
13348 	tw32(offset, save_val);
13349 	return -EIO;
13350 }
13351 
tg3_do_mem_test(struct tg3 * tp,u32 offset,u32 len)13352 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13353 {
13354 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13355 	int i;
13356 	u32 j;
13357 
13358 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13359 		for (j = 0; j < len; j += 4) {
13360 			u32 val;
13361 
13362 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13363 			tg3_read_mem(tp, offset + j, &val);
13364 			if (val != test_pattern[i])
13365 				return -EIO;
13366 		}
13367 	}
13368 	return 0;
13369 }
13370 
tg3_test_memory(struct tg3 * tp)13371 static int tg3_test_memory(struct tg3 *tp)
13372 {
13373 	static struct mem_entry {
13374 		u32 offset;
13375 		u32 len;
13376 	} mem_tbl_570x[] = {
13377 		{ 0x00000000, 0x00b50},
13378 		{ 0x00002000, 0x1c000},
13379 		{ 0xffffffff, 0x00000}
13380 	}, mem_tbl_5705[] = {
13381 		{ 0x00000100, 0x0000c},
13382 		{ 0x00000200, 0x00008},
13383 		{ 0x00004000, 0x00800},
13384 		{ 0x00006000, 0x01000},
13385 		{ 0x00008000, 0x02000},
13386 		{ 0x00010000, 0x0e000},
13387 		{ 0xffffffff, 0x00000}
13388 	}, mem_tbl_5755[] = {
13389 		{ 0x00000200, 0x00008},
13390 		{ 0x00004000, 0x00800},
13391 		{ 0x00006000, 0x00800},
13392 		{ 0x00008000, 0x02000},
13393 		{ 0x00010000, 0x0c000},
13394 		{ 0xffffffff, 0x00000}
13395 	}, mem_tbl_5906[] = {
13396 		{ 0x00000200, 0x00008},
13397 		{ 0x00004000, 0x00400},
13398 		{ 0x00006000, 0x00400},
13399 		{ 0x00008000, 0x01000},
13400 		{ 0x00010000, 0x01000},
13401 		{ 0xffffffff, 0x00000}
13402 	}, mem_tbl_5717[] = {
13403 		{ 0x00000200, 0x00008},
13404 		{ 0x00010000, 0x0a000},
13405 		{ 0x00020000, 0x13c00},
13406 		{ 0xffffffff, 0x00000}
13407 	}, mem_tbl_57765[] = {
13408 		{ 0x00000200, 0x00008},
13409 		{ 0x00004000, 0x00800},
13410 		{ 0x00006000, 0x09800},
13411 		{ 0x00010000, 0x0a000},
13412 		{ 0xffffffff, 0x00000}
13413 	};
13414 	struct mem_entry *mem_tbl;
13415 	int err = 0;
13416 	int i;
13417 
13418 	if (tg3_flag(tp, 5717_PLUS))
13419 		mem_tbl = mem_tbl_5717;
13420 	else if (tg3_flag(tp, 57765_CLASS) ||
13421 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13422 		mem_tbl = mem_tbl_57765;
13423 	else if (tg3_flag(tp, 5755_PLUS))
13424 		mem_tbl = mem_tbl_5755;
13425 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13426 		mem_tbl = mem_tbl_5906;
13427 	else if (tg3_flag(tp, 5705_PLUS))
13428 		mem_tbl = mem_tbl_5705;
13429 	else
13430 		mem_tbl = mem_tbl_570x;
13431 
13432 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13433 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13434 		if (err)
13435 			break;
13436 	}
13437 
13438 	return err;
13439 }
13440 
13441 #define TG3_TSO_MSS		500
13442 
13443 #define TG3_TSO_IP_HDR_LEN	20
13444 #define TG3_TSO_TCP_HDR_LEN	20
13445 #define TG3_TSO_TCP_OPT_LEN	12
13446 
13447 static const u8 tg3_tso_header[] = {
13448 0x08, 0x00,
13449 0x45, 0x00, 0x00, 0x00,
13450 0x00, 0x00, 0x40, 0x00,
13451 0x40, 0x06, 0x00, 0x00,
13452 0x0a, 0x00, 0x00, 0x01,
13453 0x0a, 0x00, 0x00, 0x02,
13454 0x0d, 0x00, 0xe0, 0x00,
13455 0x00, 0x00, 0x01, 0x00,
13456 0x00, 0x00, 0x02, 0x00,
13457 0x80, 0x10, 0x10, 0x00,
13458 0x14, 0x09, 0x00, 0x00,
13459 0x01, 0x01, 0x08, 0x0a,
13460 0x11, 0x11, 0x11, 0x11,
13461 0x11, 0x11, 0x11, 0x11,
13462 };
13463 
tg3_run_loopback(struct tg3 * tp,u32 pktsz,bool tso_loopback)13464 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13465 {
13466 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13467 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13468 	u32 budget;
13469 	struct sk_buff *skb;
13470 	u8 *tx_data, *rx_data;
13471 	dma_addr_t map;
13472 	int num_pkts, tx_len, rx_len, i, err;
13473 	struct tg3_rx_buffer_desc *desc;
13474 	struct tg3_napi *tnapi, *rnapi;
13475 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13476 
13477 	tnapi = &tp->napi[0];
13478 	rnapi = &tp->napi[0];
13479 	if (tp->irq_cnt > 1) {
13480 		if (tg3_flag(tp, ENABLE_RSS))
13481 			rnapi = &tp->napi[1];
13482 		if (tg3_flag(tp, ENABLE_TSS))
13483 			tnapi = &tp->napi[1];
13484 	}
13485 	coal_now = tnapi->coal_now | rnapi->coal_now;
13486 
13487 	err = -EIO;
13488 
13489 	tx_len = pktsz;
13490 	skb = netdev_alloc_skb(tp->dev, tx_len);
13491 	if (!skb)
13492 		return -ENOMEM;
13493 
13494 	tx_data = skb_put(skb, tx_len);
13495 	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13496 	memset(tx_data + ETH_ALEN, 0x0, 8);
13497 
13498 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13499 
13500 	if (tso_loopback) {
13501 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13502 
13503 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13504 			      TG3_TSO_TCP_OPT_LEN;
13505 
13506 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13507 		       sizeof(tg3_tso_header));
13508 		mss = TG3_TSO_MSS;
13509 
13510 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13511 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13512 
13513 		/* Set the total length field in the IP header */
13514 		iph->tot_len = htons((u16)(mss + hdr_len));
13515 
13516 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13517 			      TXD_FLAG_CPU_POST_DMA);
13518 
13519 		if (tg3_flag(tp, HW_TSO_1) ||
13520 		    tg3_flag(tp, HW_TSO_2) ||
13521 		    tg3_flag(tp, HW_TSO_3)) {
13522 			struct tcphdr *th;
13523 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13524 			th = (struct tcphdr *)&tx_data[val];
13525 			th->check = 0;
13526 		} else
13527 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13528 
13529 		if (tg3_flag(tp, HW_TSO_3)) {
13530 			mss |= (hdr_len & 0xc) << 12;
13531 			if (hdr_len & 0x10)
13532 				base_flags |= 0x00000010;
13533 			base_flags |= (hdr_len & 0x3e0) << 5;
13534 		} else if (tg3_flag(tp, HW_TSO_2))
13535 			mss |= hdr_len << 9;
13536 		else if (tg3_flag(tp, HW_TSO_1) ||
13537 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13538 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13539 		} else {
13540 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13541 		}
13542 
13543 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13544 	} else {
13545 		num_pkts = 1;
13546 		data_off = ETH_HLEN;
13547 
13548 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13549 		    tx_len > VLAN_ETH_FRAME_LEN)
13550 			base_flags |= TXD_FLAG_JMB_PKT;
13551 	}
13552 
13553 	for (i = data_off; i < tx_len; i++)
13554 		tx_data[i] = (u8) (i & 0xff);
13555 
13556 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13557 	if (pci_dma_mapping_error(tp->pdev, map)) {
13558 		dev_kfree_skb(skb);
13559 		return -EIO;
13560 	}
13561 
13562 	val = tnapi->tx_prod;
13563 	tnapi->tx_buffers[val].skb = skb;
13564 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13565 
13566 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13567 	       rnapi->coal_now);
13568 
13569 	udelay(10);
13570 
13571 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13572 
13573 	budget = tg3_tx_avail(tnapi);
13574 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13575 			    base_flags | TXD_FLAG_END, mss, 0)) {
13576 		tnapi->tx_buffers[val].skb = NULL;
13577 		dev_kfree_skb(skb);
13578 		return -EIO;
13579 	}
13580 
13581 	tnapi->tx_prod++;
13582 
13583 	/* Sync BD data before updating mailbox */
13584 	wmb();
13585 
13586 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13587 	tr32_mailbox(tnapi->prodmbox);
13588 
13589 	udelay(10);
13590 
13591 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13592 	for (i = 0; i < 35; i++) {
13593 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13594 		       coal_now);
13595 
13596 		udelay(10);
13597 
13598 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13599 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13600 		if ((tx_idx == tnapi->tx_prod) &&
13601 		    (rx_idx == (rx_start_idx + num_pkts)))
13602 			break;
13603 	}
13604 
13605 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13606 	dev_kfree_skb(skb);
13607 
13608 	if (tx_idx != tnapi->tx_prod)
13609 		goto out;
13610 
13611 	if (rx_idx != rx_start_idx + num_pkts)
13612 		goto out;
13613 
13614 	val = data_off;
13615 	while (rx_idx != rx_start_idx) {
13616 		desc = &rnapi->rx_rcb[rx_start_idx++];
13617 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13618 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13619 
13620 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13621 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13622 			goto out;
13623 
13624 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13625 			 - ETH_FCS_LEN;
13626 
13627 		if (!tso_loopback) {
13628 			if (rx_len != tx_len)
13629 				goto out;
13630 
13631 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13632 				if (opaque_key != RXD_OPAQUE_RING_STD)
13633 					goto out;
13634 			} else {
13635 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13636 					goto out;
13637 			}
13638 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13639 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13640 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13641 			goto out;
13642 		}
13643 
13644 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13645 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13646 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13647 					     mapping);
13648 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13649 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13650 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13651 					     mapping);
13652 		} else
13653 			goto out;
13654 
13655 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13656 					    PCI_DMA_FROMDEVICE);
13657 
13658 		rx_data += TG3_RX_OFFSET(tp);
13659 		for (i = data_off; i < rx_len; i++, val++) {
13660 			if (*(rx_data + i) != (u8) (val & 0xff))
13661 				goto out;
13662 		}
13663 	}
13664 
13665 	err = 0;
13666 
13667 	/* tg3_free_rings will unmap and free the rx_data */
13668 out:
13669 	return err;
13670 }
13671 
13672 #define TG3_STD_LOOPBACK_FAILED		1
13673 #define TG3_JMB_LOOPBACK_FAILED		2
13674 #define TG3_TSO_LOOPBACK_FAILED		4
13675 #define TG3_LOOPBACK_FAILED \
13676 	(TG3_STD_LOOPBACK_FAILED | \
13677 	 TG3_JMB_LOOPBACK_FAILED | \
13678 	 TG3_TSO_LOOPBACK_FAILED)
13679 
tg3_test_loopback(struct tg3 * tp,u64 * data,bool do_extlpbk)13680 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13681 {
13682 	int err = -EIO;
13683 	u32 eee_cap;
13684 	u32 jmb_pkt_sz = 9000;
13685 
13686 	if (tp->dma_limit)
13687 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13688 
13689 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13690 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13691 
13692 	if (!netif_running(tp->dev)) {
13693 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13694 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13695 		if (do_extlpbk)
13696 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13697 		goto done;
13698 	}
13699 
13700 	err = tg3_reset_hw(tp, true);
13701 	if (err) {
13702 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13703 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13704 		if (do_extlpbk)
13705 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13706 		goto done;
13707 	}
13708 
13709 	if (tg3_flag(tp, ENABLE_RSS)) {
13710 		int i;
13711 
13712 		/* Reroute all rx packets to the 1st queue */
13713 		for (i = MAC_RSS_INDIR_TBL_0;
13714 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13715 			tw32(i, 0x0);
13716 	}
13717 
13718 	/* HW errata - mac loopback fails in some cases on 5780.
13719 	 * Normal traffic and PHY loopback are not affected by
13720 	 * errata.  Also, the MAC loopback test is deprecated for
13721 	 * all newer ASIC revisions.
13722 	 */
13723 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13724 	    !tg3_flag(tp, CPMU_PRESENT)) {
13725 		tg3_mac_loopback(tp, true);
13726 
13727 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13728 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13729 
13730 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13731 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13732 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13733 
13734 		tg3_mac_loopback(tp, false);
13735 	}
13736 
13737 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13738 	    !tg3_flag(tp, USE_PHYLIB)) {
13739 		int i;
13740 
13741 		tg3_phy_lpbk_set(tp, 0, false);
13742 
13743 		/* Wait for link */
13744 		for (i = 0; i < 100; i++) {
13745 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13746 				break;
13747 			mdelay(1);
13748 		}
13749 
13750 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13751 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13752 		if (tg3_flag(tp, TSO_CAPABLE) &&
13753 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13754 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13755 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13756 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13757 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13758 
13759 		if (do_extlpbk) {
13760 			tg3_phy_lpbk_set(tp, 0, true);
13761 
13762 			/* All link indications report up, but the hardware
13763 			 * isn't really ready for about 20 msec.  Double it
13764 			 * to be sure.
13765 			 */
13766 			mdelay(40);
13767 
13768 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13769 				data[TG3_EXT_LOOPB_TEST] |=
13770 							TG3_STD_LOOPBACK_FAILED;
13771 			if (tg3_flag(tp, TSO_CAPABLE) &&
13772 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13773 				data[TG3_EXT_LOOPB_TEST] |=
13774 							TG3_TSO_LOOPBACK_FAILED;
13775 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13776 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13777 				data[TG3_EXT_LOOPB_TEST] |=
13778 							TG3_JMB_LOOPBACK_FAILED;
13779 		}
13780 
13781 		/* Re-enable gphy autopowerdown. */
13782 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13783 			tg3_phy_toggle_apd(tp, true);
13784 	}
13785 
13786 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13787 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13788 
13789 done:
13790 	tp->phy_flags |= eee_cap;
13791 
13792 	return err;
13793 }
13794 
tg3_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * data)13795 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13796 			  u64 *data)
13797 {
13798 	struct tg3 *tp = netdev_priv(dev);
13799 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13800 
13801 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13802 		if (tg3_power_up(tp)) {
13803 			etest->flags |= ETH_TEST_FL_FAILED;
13804 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13805 			return;
13806 		}
13807 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13808 	}
13809 
13810 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13811 
13812 	if (tg3_test_nvram(tp) != 0) {
13813 		etest->flags |= ETH_TEST_FL_FAILED;
13814 		data[TG3_NVRAM_TEST] = 1;
13815 	}
13816 	if (!doextlpbk && tg3_test_link(tp)) {
13817 		etest->flags |= ETH_TEST_FL_FAILED;
13818 		data[TG3_LINK_TEST] = 1;
13819 	}
13820 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13821 		int err, err2 = 0, irq_sync = 0;
13822 
13823 		if (netif_running(dev)) {
13824 			tg3_phy_stop(tp);
13825 			tg3_netif_stop(tp);
13826 			irq_sync = 1;
13827 		}
13828 
13829 		tg3_full_lock(tp, irq_sync);
13830 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13831 		err = tg3_nvram_lock(tp);
13832 		tg3_halt_cpu(tp, RX_CPU_BASE);
13833 		if (!tg3_flag(tp, 5705_PLUS))
13834 			tg3_halt_cpu(tp, TX_CPU_BASE);
13835 		if (!err)
13836 			tg3_nvram_unlock(tp);
13837 
13838 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13839 			tg3_phy_reset(tp);
13840 
13841 		if (tg3_test_registers(tp) != 0) {
13842 			etest->flags |= ETH_TEST_FL_FAILED;
13843 			data[TG3_REGISTER_TEST] = 1;
13844 		}
13845 
13846 		if (tg3_test_memory(tp) != 0) {
13847 			etest->flags |= ETH_TEST_FL_FAILED;
13848 			data[TG3_MEMORY_TEST] = 1;
13849 		}
13850 
13851 		if (doextlpbk)
13852 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13853 
13854 		if (tg3_test_loopback(tp, data, doextlpbk))
13855 			etest->flags |= ETH_TEST_FL_FAILED;
13856 
13857 		tg3_full_unlock(tp);
13858 
13859 		if (tg3_test_interrupt(tp) != 0) {
13860 			etest->flags |= ETH_TEST_FL_FAILED;
13861 			data[TG3_INTERRUPT_TEST] = 1;
13862 		}
13863 
13864 		tg3_full_lock(tp, 0);
13865 
13866 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13867 		if (netif_running(dev)) {
13868 			tg3_flag_set(tp, INIT_COMPLETE);
13869 			err2 = tg3_restart_hw(tp, true);
13870 			if (!err2)
13871 				tg3_netif_start(tp);
13872 		}
13873 
13874 		tg3_full_unlock(tp);
13875 
13876 		if (irq_sync && !err2)
13877 			tg3_phy_start(tp);
13878 	}
13879 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13880 		tg3_power_down_prepare(tp);
13881 
13882 }
13883 
tg3_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)13884 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13885 {
13886 	struct tg3 *tp = netdev_priv(dev);
13887 	struct hwtstamp_config stmpconf;
13888 
13889 	if (!tg3_flag(tp, PTP_CAPABLE))
13890 		return -EOPNOTSUPP;
13891 
13892 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13893 		return -EFAULT;
13894 
13895 	if (stmpconf.flags)
13896 		return -EINVAL;
13897 
13898 	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13899 	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13900 		return -ERANGE;
13901 
13902 	switch (stmpconf.rx_filter) {
13903 	case HWTSTAMP_FILTER_NONE:
13904 		tp->rxptpctl = 0;
13905 		break;
13906 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13907 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13908 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13909 		break;
13910 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13911 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13912 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13913 		break;
13914 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13915 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13916 			       TG3_RX_PTP_CTL_DELAY_REQ;
13917 		break;
13918 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13919 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13920 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13921 		break;
13922 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13923 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13924 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13925 		break;
13926 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13927 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13928 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13929 		break;
13930 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13931 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13932 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13933 		break;
13934 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13935 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13936 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13937 		break;
13938 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13939 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13940 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13941 		break;
13942 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13943 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13944 			       TG3_RX_PTP_CTL_DELAY_REQ;
13945 		break;
13946 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13947 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13948 			       TG3_RX_PTP_CTL_DELAY_REQ;
13949 		break;
13950 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13951 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13952 			       TG3_RX_PTP_CTL_DELAY_REQ;
13953 		break;
13954 	default:
13955 		return -ERANGE;
13956 	}
13957 
13958 	if (netif_running(dev) && tp->rxptpctl)
13959 		tw32(TG3_RX_PTP_CTL,
13960 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13961 
13962 	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13963 		tg3_flag_set(tp, TX_TSTAMP_EN);
13964 	else
13965 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13966 
13967 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13968 		-EFAULT : 0;
13969 }
13970 
tg3_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)13971 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13972 {
13973 	struct tg3 *tp = netdev_priv(dev);
13974 	struct hwtstamp_config stmpconf;
13975 
13976 	if (!tg3_flag(tp, PTP_CAPABLE))
13977 		return -EOPNOTSUPP;
13978 
13979 	stmpconf.flags = 0;
13980 	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13981 			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13982 
13983 	switch (tp->rxptpctl) {
13984 	case 0:
13985 		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13986 		break;
13987 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13988 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13989 		break;
13990 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13991 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13992 		break;
13993 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13994 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13995 		break;
13996 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13997 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13998 		break;
13999 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14000 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
14001 		break;
14002 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14003 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
14004 		break;
14005 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14006 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
14007 		break;
14008 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14009 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
14010 		break;
14011 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14012 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
14013 		break;
14014 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14015 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
14016 		break;
14017 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14018 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
14019 		break;
14020 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14021 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
14022 		break;
14023 	default:
14024 		WARN_ON_ONCE(1);
14025 		return -ERANGE;
14026 	}
14027 
14028 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14029 		-EFAULT : 0;
14030 }
14031 
tg3_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)14032 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14033 {
14034 	struct mii_ioctl_data *data = if_mii(ifr);
14035 	struct tg3 *tp = netdev_priv(dev);
14036 	int err;
14037 
14038 	if (tg3_flag(tp, USE_PHYLIB)) {
14039 		struct phy_device *phydev;
14040 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14041 			return -EAGAIN;
14042 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14043 		return phy_mii_ioctl(phydev, ifr, cmd);
14044 	}
14045 
14046 	switch (cmd) {
14047 	case SIOCGMIIPHY:
14048 		data->phy_id = tp->phy_addr;
14049 
14050 		fallthrough;
14051 	case SIOCGMIIREG: {
14052 		u32 mii_regval;
14053 
14054 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14055 			break;			/* We have no PHY */
14056 
14057 		if (!netif_running(dev))
14058 			return -EAGAIN;
14059 
14060 		spin_lock_bh(&tp->lock);
14061 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
14062 				    data->reg_num & 0x1f, &mii_regval);
14063 		spin_unlock_bh(&tp->lock);
14064 
14065 		data->val_out = mii_regval;
14066 
14067 		return err;
14068 	}
14069 
14070 	case SIOCSMIIREG:
14071 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14072 			break;			/* We have no PHY */
14073 
14074 		if (!netif_running(dev))
14075 			return -EAGAIN;
14076 
14077 		spin_lock_bh(&tp->lock);
14078 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
14079 				     data->reg_num & 0x1f, data->val_in);
14080 		spin_unlock_bh(&tp->lock);
14081 
14082 		return err;
14083 
14084 	case SIOCSHWTSTAMP:
14085 		return tg3_hwtstamp_set(dev, ifr);
14086 
14087 	case SIOCGHWTSTAMP:
14088 		return tg3_hwtstamp_get(dev, ifr);
14089 
14090 	default:
14091 		/* do nothing */
14092 		break;
14093 	}
14094 	return -EOPNOTSUPP;
14095 }
14096 
tg3_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)14097 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14098 {
14099 	struct tg3 *tp = netdev_priv(dev);
14100 
14101 	memcpy(ec, &tp->coal, sizeof(*ec));
14102 	return 0;
14103 }
14104 
tg3_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)14105 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14106 {
14107 	struct tg3 *tp = netdev_priv(dev);
14108 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14109 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14110 
14111 	if (!tg3_flag(tp, 5705_PLUS)) {
14112 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14113 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14114 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14115 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14116 	}
14117 
14118 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14119 	    (!ec->rx_coalesce_usecs) ||
14120 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14121 	    (!ec->tx_coalesce_usecs) ||
14122 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14123 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14124 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14125 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14126 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14127 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14128 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14129 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14130 		return -EINVAL;
14131 
14132 	/* Only copy relevant parameters, ignore all others. */
14133 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14134 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14135 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14136 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14137 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14138 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14139 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14140 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14141 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14142 
14143 	if (netif_running(dev)) {
14144 		tg3_full_lock(tp, 0);
14145 		__tg3_set_coalesce(tp, &tp->coal);
14146 		tg3_full_unlock(tp);
14147 	}
14148 	return 0;
14149 }
14150 
tg3_set_eee(struct net_device * dev,struct ethtool_eee * edata)14151 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14152 {
14153 	struct tg3 *tp = netdev_priv(dev);
14154 
14155 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14156 		netdev_warn(tp->dev, "Board does not support EEE!\n");
14157 		return -EOPNOTSUPP;
14158 	}
14159 
14160 	if (edata->advertised != tp->eee.advertised) {
14161 		netdev_warn(tp->dev,
14162 			    "Direct manipulation of EEE advertisement is not supported\n");
14163 		return -EINVAL;
14164 	}
14165 
14166 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14167 		netdev_warn(tp->dev,
14168 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
14169 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14170 		return -EINVAL;
14171 	}
14172 
14173 	tp->eee = *edata;
14174 
14175 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14176 	tg3_warn_mgmt_link_flap(tp);
14177 
14178 	if (netif_running(tp->dev)) {
14179 		tg3_full_lock(tp, 0);
14180 		tg3_setup_eee(tp);
14181 		tg3_phy_reset(tp);
14182 		tg3_full_unlock(tp);
14183 	}
14184 
14185 	return 0;
14186 }
14187 
tg3_get_eee(struct net_device * dev,struct ethtool_eee * edata)14188 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14189 {
14190 	struct tg3 *tp = netdev_priv(dev);
14191 
14192 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14193 		netdev_warn(tp->dev,
14194 			    "Board does not support EEE!\n");
14195 		return -EOPNOTSUPP;
14196 	}
14197 
14198 	*edata = tp->eee;
14199 	return 0;
14200 }
14201 
14202 static const struct ethtool_ops tg3_ethtool_ops = {
14203 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14204 				     ETHTOOL_COALESCE_MAX_FRAMES |
14205 				     ETHTOOL_COALESCE_USECS_IRQ |
14206 				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14207 				     ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14208 	.get_drvinfo		= tg3_get_drvinfo,
14209 	.get_regs_len		= tg3_get_regs_len,
14210 	.get_regs		= tg3_get_regs,
14211 	.get_wol		= tg3_get_wol,
14212 	.set_wol		= tg3_set_wol,
14213 	.get_msglevel		= tg3_get_msglevel,
14214 	.set_msglevel		= tg3_set_msglevel,
14215 	.nway_reset		= tg3_nway_reset,
14216 	.get_link		= ethtool_op_get_link,
14217 	.get_eeprom_len		= tg3_get_eeprom_len,
14218 	.get_eeprom		= tg3_get_eeprom,
14219 	.set_eeprom		= tg3_set_eeprom,
14220 	.get_ringparam		= tg3_get_ringparam,
14221 	.set_ringparam		= tg3_set_ringparam,
14222 	.get_pauseparam		= tg3_get_pauseparam,
14223 	.set_pauseparam		= tg3_set_pauseparam,
14224 	.self_test		= tg3_self_test,
14225 	.get_strings		= tg3_get_strings,
14226 	.set_phys_id		= tg3_set_phys_id,
14227 	.get_ethtool_stats	= tg3_get_ethtool_stats,
14228 	.get_coalesce		= tg3_get_coalesce,
14229 	.set_coalesce		= tg3_set_coalesce,
14230 	.get_sset_count		= tg3_get_sset_count,
14231 	.get_rxnfc		= tg3_get_rxnfc,
14232 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14233 	.get_rxfh		= tg3_get_rxfh,
14234 	.set_rxfh		= tg3_set_rxfh,
14235 	.get_channels		= tg3_get_channels,
14236 	.set_channels		= tg3_set_channels,
14237 	.get_ts_info		= tg3_get_ts_info,
14238 	.get_eee		= tg3_get_eee,
14239 	.set_eee		= tg3_set_eee,
14240 	.get_link_ksettings	= tg3_get_link_ksettings,
14241 	.set_link_ksettings	= tg3_set_link_ksettings,
14242 };
14243 
tg3_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)14244 static void tg3_get_stats64(struct net_device *dev,
14245 			    struct rtnl_link_stats64 *stats)
14246 {
14247 	struct tg3 *tp = netdev_priv(dev);
14248 
14249 	spin_lock_bh(&tp->lock);
14250 	if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14251 		*stats = tp->net_stats_prev;
14252 		spin_unlock_bh(&tp->lock);
14253 		return;
14254 	}
14255 
14256 	tg3_get_nstats(tp, stats);
14257 	spin_unlock_bh(&tp->lock);
14258 }
14259 
tg3_set_rx_mode(struct net_device * dev)14260 static void tg3_set_rx_mode(struct net_device *dev)
14261 {
14262 	struct tg3 *tp = netdev_priv(dev);
14263 
14264 	if (!netif_running(dev))
14265 		return;
14266 
14267 	tg3_full_lock(tp, 0);
14268 	__tg3_set_rx_mode(dev);
14269 	tg3_full_unlock(tp);
14270 }
14271 
tg3_set_mtu(struct net_device * dev,struct tg3 * tp,int new_mtu)14272 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14273 			       int new_mtu)
14274 {
14275 	dev->mtu = new_mtu;
14276 
14277 	if (new_mtu > ETH_DATA_LEN) {
14278 		if (tg3_flag(tp, 5780_CLASS)) {
14279 			netdev_update_features(dev);
14280 			tg3_flag_clear(tp, TSO_CAPABLE);
14281 		} else {
14282 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
14283 		}
14284 	} else {
14285 		if (tg3_flag(tp, 5780_CLASS)) {
14286 			tg3_flag_set(tp, TSO_CAPABLE);
14287 			netdev_update_features(dev);
14288 		}
14289 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14290 	}
14291 }
14292 
tg3_change_mtu(struct net_device * dev,int new_mtu)14293 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14294 {
14295 	struct tg3 *tp = netdev_priv(dev);
14296 	int err;
14297 	bool reset_phy = false;
14298 
14299 	if (!netif_running(dev)) {
14300 		/* We'll just catch it later when the
14301 		 * device is up'd.
14302 		 */
14303 		tg3_set_mtu(dev, tp, new_mtu);
14304 		return 0;
14305 	}
14306 
14307 	tg3_phy_stop(tp);
14308 
14309 	tg3_netif_stop(tp);
14310 
14311 	tg3_set_mtu(dev, tp, new_mtu);
14312 
14313 	tg3_full_lock(tp, 1);
14314 
14315 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14316 
14317 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
14318 	 * breaks all requests to 256 bytes.
14319 	 */
14320 	if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14321 	    tg3_asic_rev(tp) == ASIC_REV_5717 ||
14322 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
14323 	    tg3_asic_rev(tp) == ASIC_REV_5720)
14324 		reset_phy = true;
14325 
14326 	err = tg3_restart_hw(tp, reset_phy);
14327 
14328 	if (!err)
14329 		tg3_netif_start(tp);
14330 
14331 	tg3_full_unlock(tp);
14332 
14333 	if (!err)
14334 		tg3_phy_start(tp);
14335 
14336 	return err;
14337 }
14338 
14339 static const struct net_device_ops tg3_netdev_ops = {
14340 	.ndo_open		= tg3_open,
14341 	.ndo_stop		= tg3_close,
14342 	.ndo_start_xmit		= tg3_start_xmit,
14343 	.ndo_get_stats64	= tg3_get_stats64,
14344 	.ndo_validate_addr	= eth_validate_addr,
14345 	.ndo_set_rx_mode	= tg3_set_rx_mode,
14346 	.ndo_set_mac_address	= tg3_set_mac_addr,
14347 	.ndo_do_ioctl		= tg3_ioctl,
14348 	.ndo_tx_timeout		= tg3_tx_timeout,
14349 	.ndo_change_mtu		= tg3_change_mtu,
14350 	.ndo_fix_features	= tg3_fix_features,
14351 	.ndo_set_features	= tg3_set_features,
14352 #ifdef CONFIG_NET_POLL_CONTROLLER
14353 	.ndo_poll_controller	= tg3_poll_controller,
14354 #endif
14355 };
14356 
tg3_get_eeprom_size(struct tg3 * tp)14357 static void tg3_get_eeprom_size(struct tg3 *tp)
14358 {
14359 	u32 cursize, val, magic;
14360 
14361 	tp->nvram_size = EEPROM_CHIP_SIZE;
14362 
14363 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14364 		return;
14365 
14366 	if ((magic != TG3_EEPROM_MAGIC) &&
14367 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14368 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14369 		return;
14370 
14371 	/*
14372 	 * Size the chip by reading offsets at increasing powers of two.
14373 	 * When we encounter our validation signature, we know the addressing
14374 	 * has wrapped around, and thus have our chip size.
14375 	 */
14376 	cursize = 0x10;
14377 
14378 	while (cursize < tp->nvram_size) {
14379 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14380 			return;
14381 
14382 		if (val == magic)
14383 			break;
14384 
14385 		cursize <<= 1;
14386 	}
14387 
14388 	tp->nvram_size = cursize;
14389 }
14390 
tg3_get_nvram_size(struct tg3 * tp)14391 static void tg3_get_nvram_size(struct tg3 *tp)
14392 {
14393 	u32 val;
14394 
14395 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14396 		return;
14397 
14398 	/* Selfboot format */
14399 	if (val != TG3_EEPROM_MAGIC) {
14400 		tg3_get_eeprom_size(tp);
14401 		return;
14402 	}
14403 
14404 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14405 		if (val != 0) {
14406 			/* This is confusing.  We want to operate on the
14407 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14408 			 * call will read from NVRAM and byteswap the data
14409 			 * according to the byteswapping settings for all
14410 			 * other register accesses.  This ensures the data we
14411 			 * want will always reside in the lower 16-bits.
14412 			 * However, the data in NVRAM is in LE format, which
14413 			 * means the data from the NVRAM read will always be
14414 			 * opposite the endianness of the CPU.  The 16-bit
14415 			 * byteswap then brings the data to CPU endianness.
14416 			 */
14417 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14418 			return;
14419 		}
14420 	}
14421 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14422 }
14423 
tg3_get_nvram_info(struct tg3 * tp)14424 static void tg3_get_nvram_info(struct tg3 *tp)
14425 {
14426 	u32 nvcfg1;
14427 
14428 	nvcfg1 = tr32(NVRAM_CFG1);
14429 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14430 		tg3_flag_set(tp, FLASH);
14431 	} else {
14432 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14433 		tw32(NVRAM_CFG1, nvcfg1);
14434 	}
14435 
14436 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14437 	    tg3_flag(tp, 5780_CLASS)) {
14438 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14439 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14440 			tp->nvram_jedecnum = JEDEC_ATMEL;
14441 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14442 			tg3_flag_set(tp, NVRAM_BUFFERED);
14443 			break;
14444 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14445 			tp->nvram_jedecnum = JEDEC_ATMEL;
14446 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14447 			break;
14448 		case FLASH_VENDOR_ATMEL_EEPROM:
14449 			tp->nvram_jedecnum = JEDEC_ATMEL;
14450 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14451 			tg3_flag_set(tp, NVRAM_BUFFERED);
14452 			break;
14453 		case FLASH_VENDOR_ST:
14454 			tp->nvram_jedecnum = JEDEC_ST;
14455 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14456 			tg3_flag_set(tp, NVRAM_BUFFERED);
14457 			break;
14458 		case FLASH_VENDOR_SAIFUN:
14459 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14460 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14461 			break;
14462 		case FLASH_VENDOR_SST_SMALL:
14463 		case FLASH_VENDOR_SST_LARGE:
14464 			tp->nvram_jedecnum = JEDEC_SST;
14465 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14466 			break;
14467 		}
14468 	} else {
14469 		tp->nvram_jedecnum = JEDEC_ATMEL;
14470 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14471 		tg3_flag_set(tp, NVRAM_BUFFERED);
14472 	}
14473 }
14474 
tg3_nvram_get_pagesize(struct tg3 * tp,u32 nvmcfg1)14475 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14476 {
14477 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14478 	case FLASH_5752PAGE_SIZE_256:
14479 		tp->nvram_pagesize = 256;
14480 		break;
14481 	case FLASH_5752PAGE_SIZE_512:
14482 		tp->nvram_pagesize = 512;
14483 		break;
14484 	case FLASH_5752PAGE_SIZE_1K:
14485 		tp->nvram_pagesize = 1024;
14486 		break;
14487 	case FLASH_5752PAGE_SIZE_2K:
14488 		tp->nvram_pagesize = 2048;
14489 		break;
14490 	case FLASH_5752PAGE_SIZE_4K:
14491 		tp->nvram_pagesize = 4096;
14492 		break;
14493 	case FLASH_5752PAGE_SIZE_264:
14494 		tp->nvram_pagesize = 264;
14495 		break;
14496 	case FLASH_5752PAGE_SIZE_528:
14497 		tp->nvram_pagesize = 528;
14498 		break;
14499 	}
14500 }
14501 
tg3_get_5752_nvram_info(struct tg3 * tp)14502 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14503 {
14504 	u32 nvcfg1;
14505 
14506 	nvcfg1 = tr32(NVRAM_CFG1);
14507 
14508 	/* NVRAM protection for TPM */
14509 	if (nvcfg1 & (1 << 27))
14510 		tg3_flag_set(tp, PROTECTED_NVRAM);
14511 
14512 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14513 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14514 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14515 		tp->nvram_jedecnum = JEDEC_ATMEL;
14516 		tg3_flag_set(tp, NVRAM_BUFFERED);
14517 		break;
14518 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14519 		tp->nvram_jedecnum = JEDEC_ATMEL;
14520 		tg3_flag_set(tp, NVRAM_BUFFERED);
14521 		tg3_flag_set(tp, FLASH);
14522 		break;
14523 	case FLASH_5752VENDOR_ST_M45PE10:
14524 	case FLASH_5752VENDOR_ST_M45PE20:
14525 	case FLASH_5752VENDOR_ST_M45PE40:
14526 		tp->nvram_jedecnum = JEDEC_ST;
14527 		tg3_flag_set(tp, NVRAM_BUFFERED);
14528 		tg3_flag_set(tp, FLASH);
14529 		break;
14530 	}
14531 
14532 	if (tg3_flag(tp, FLASH)) {
14533 		tg3_nvram_get_pagesize(tp, nvcfg1);
14534 	} else {
14535 		/* For eeprom, set pagesize to maximum eeprom size */
14536 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14537 
14538 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14539 		tw32(NVRAM_CFG1, nvcfg1);
14540 	}
14541 }
14542 
tg3_get_5755_nvram_info(struct tg3 * tp)14543 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14544 {
14545 	u32 nvcfg1, protect = 0;
14546 
14547 	nvcfg1 = tr32(NVRAM_CFG1);
14548 
14549 	/* NVRAM protection for TPM */
14550 	if (nvcfg1 & (1 << 27)) {
14551 		tg3_flag_set(tp, PROTECTED_NVRAM);
14552 		protect = 1;
14553 	}
14554 
14555 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14556 	switch (nvcfg1) {
14557 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14558 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14559 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14560 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14561 		tp->nvram_jedecnum = JEDEC_ATMEL;
14562 		tg3_flag_set(tp, NVRAM_BUFFERED);
14563 		tg3_flag_set(tp, FLASH);
14564 		tp->nvram_pagesize = 264;
14565 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14566 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14567 			tp->nvram_size = (protect ? 0x3e200 :
14568 					  TG3_NVRAM_SIZE_512KB);
14569 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14570 			tp->nvram_size = (protect ? 0x1f200 :
14571 					  TG3_NVRAM_SIZE_256KB);
14572 		else
14573 			tp->nvram_size = (protect ? 0x1f200 :
14574 					  TG3_NVRAM_SIZE_128KB);
14575 		break;
14576 	case FLASH_5752VENDOR_ST_M45PE10:
14577 	case FLASH_5752VENDOR_ST_M45PE20:
14578 	case FLASH_5752VENDOR_ST_M45PE40:
14579 		tp->nvram_jedecnum = JEDEC_ST;
14580 		tg3_flag_set(tp, NVRAM_BUFFERED);
14581 		tg3_flag_set(tp, FLASH);
14582 		tp->nvram_pagesize = 256;
14583 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14584 			tp->nvram_size = (protect ?
14585 					  TG3_NVRAM_SIZE_64KB :
14586 					  TG3_NVRAM_SIZE_128KB);
14587 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14588 			tp->nvram_size = (protect ?
14589 					  TG3_NVRAM_SIZE_64KB :
14590 					  TG3_NVRAM_SIZE_256KB);
14591 		else
14592 			tp->nvram_size = (protect ?
14593 					  TG3_NVRAM_SIZE_128KB :
14594 					  TG3_NVRAM_SIZE_512KB);
14595 		break;
14596 	}
14597 }
14598 
tg3_get_5787_nvram_info(struct tg3 * tp)14599 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14600 {
14601 	u32 nvcfg1;
14602 
14603 	nvcfg1 = tr32(NVRAM_CFG1);
14604 
14605 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14606 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14607 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14608 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14609 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14610 		tp->nvram_jedecnum = JEDEC_ATMEL;
14611 		tg3_flag_set(tp, NVRAM_BUFFERED);
14612 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14613 
14614 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14615 		tw32(NVRAM_CFG1, nvcfg1);
14616 		break;
14617 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14618 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14619 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14620 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14621 		tp->nvram_jedecnum = JEDEC_ATMEL;
14622 		tg3_flag_set(tp, NVRAM_BUFFERED);
14623 		tg3_flag_set(tp, FLASH);
14624 		tp->nvram_pagesize = 264;
14625 		break;
14626 	case FLASH_5752VENDOR_ST_M45PE10:
14627 	case FLASH_5752VENDOR_ST_M45PE20:
14628 	case FLASH_5752VENDOR_ST_M45PE40:
14629 		tp->nvram_jedecnum = JEDEC_ST;
14630 		tg3_flag_set(tp, NVRAM_BUFFERED);
14631 		tg3_flag_set(tp, FLASH);
14632 		tp->nvram_pagesize = 256;
14633 		break;
14634 	}
14635 }
14636 
tg3_get_5761_nvram_info(struct tg3 * tp)14637 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14638 {
14639 	u32 nvcfg1, protect = 0;
14640 
14641 	nvcfg1 = tr32(NVRAM_CFG1);
14642 
14643 	/* NVRAM protection for TPM */
14644 	if (nvcfg1 & (1 << 27)) {
14645 		tg3_flag_set(tp, PROTECTED_NVRAM);
14646 		protect = 1;
14647 	}
14648 
14649 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14650 	switch (nvcfg1) {
14651 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14652 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14653 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14654 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14655 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14656 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14657 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14658 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14659 		tp->nvram_jedecnum = JEDEC_ATMEL;
14660 		tg3_flag_set(tp, NVRAM_BUFFERED);
14661 		tg3_flag_set(tp, FLASH);
14662 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14663 		tp->nvram_pagesize = 256;
14664 		break;
14665 	case FLASH_5761VENDOR_ST_A_M45PE20:
14666 	case FLASH_5761VENDOR_ST_A_M45PE40:
14667 	case FLASH_5761VENDOR_ST_A_M45PE80:
14668 	case FLASH_5761VENDOR_ST_A_M45PE16:
14669 	case FLASH_5761VENDOR_ST_M_M45PE20:
14670 	case FLASH_5761VENDOR_ST_M_M45PE40:
14671 	case FLASH_5761VENDOR_ST_M_M45PE80:
14672 	case FLASH_5761VENDOR_ST_M_M45PE16:
14673 		tp->nvram_jedecnum = JEDEC_ST;
14674 		tg3_flag_set(tp, NVRAM_BUFFERED);
14675 		tg3_flag_set(tp, FLASH);
14676 		tp->nvram_pagesize = 256;
14677 		break;
14678 	}
14679 
14680 	if (protect) {
14681 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14682 	} else {
14683 		switch (nvcfg1) {
14684 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14685 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14686 		case FLASH_5761VENDOR_ST_A_M45PE16:
14687 		case FLASH_5761VENDOR_ST_M_M45PE16:
14688 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14689 			break;
14690 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14691 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14692 		case FLASH_5761VENDOR_ST_A_M45PE80:
14693 		case FLASH_5761VENDOR_ST_M_M45PE80:
14694 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14695 			break;
14696 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14697 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14698 		case FLASH_5761VENDOR_ST_A_M45PE40:
14699 		case FLASH_5761VENDOR_ST_M_M45PE40:
14700 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14701 			break;
14702 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14703 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14704 		case FLASH_5761VENDOR_ST_A_M45PE20:
14705 		case FLASH_5761VENDOR_ST_M_M45PE20:
14706 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14707 			break;
14708 		}
14709 	}
14710 }
14711 
tg3_get_5906_nvram_info(struct tg3 * tp)14712 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14713 {
14714 	tp->nvram_jedecnum = JEDEC_ATMEL;
14715 	tg3_flag_set(tp, NVRAM_BUFFERED);
14716 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14717 }
14718 
tg3_get_57780_nvram_info(struct tg3 * tp)14719 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14720 {
14721 	u32 nvcfg1;
14722 
14723 	nvcfg1 = tr32(NVRAM_CFG1);
14724 
14725 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14726 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14727 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14728 		tp->nvram_jedecnum = JEDEC_ATMEL;
14729 		tg3_flag_set(tp, NVRAM_BUFFERED);
14730 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14731 
14732 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14733 		tw32(NVRAM_CFG1, nvcfg1);
14734 		return;
14735 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14736 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14737 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14738 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14739 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14740 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14741 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14742 		tp->nvram_jedecnum = JEDEC_ATMEL;
14743 		tg3_flag_set(tp, NVRAM_BUFFERED);
14744 		tg3_flag_set(tp, FLASH);
14745 
14746 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14747 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14748 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14749 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14750 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14751 			break;
14752 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14753 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14754 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14755 			break;
14756 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14757 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14758 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14759 			break;
14760 		}
14761 		break;
14762 	case FLASH_5752VENDOR_ST_M45PE10:
14763 	case FLASH_5752VENDOR_ST_M45PE20:
14764 	case FLASH_5752VENDOR_ST_M45PE40:
14765 		tp->nvram_jedecnum = JEDEC_ST;
14766 		tg3_flag_set(tp, NVRAM_BUFFERED);
14767 		tg3_flag_set(tp, FLASH);
14768 
14769 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14770 		case FLASH_5752VENDOR_ST_M45PE10:
14771 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14772 			break;
14773 		case FLASH_5752VENDOR_ST_M45PE20:
14774 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14775 			break;
14776 		case FLASH_5752VENDOR_ST_M45PE40:
14777 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14778 			break;
14779 		}
14780 		break;
14781 	default:
14782 		tg3_flag_set(tp, NO_NVRAM);
14783 		return;
14784 	}
14785 
14786 	tg3_nvram_get_pagesize(tp, nvcfg1);
14787 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14788 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14789 }
14790 
14791 
tg3_get_5717_nvram_info(struct tg3 * tp)14792 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14793 {
14794 	u32 nvcfg1;
14795 
14796 	nvcfg1 = tr32(NVRAM_CFG1);
14797 
14798 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14799 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14800 	case FLASH_5717VENDOR_MICRO_EEPROM:
14801 		tp->nvram_jedecnum = JEDEC_ATMEL;
14802 		tg3_flag_set(tp, NVRAM_BUFFERED);
14803 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14804 
14805 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14806 		tw32(NVRAM_CFG1, nvcfg1);
14807 		return;
14808 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14809 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14810 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14811 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14812 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14813 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14814 	case FLASH_5717VENDOR_ATMEL_45USPT:
14815 		tp->nvram_jedecnum = JEDEC_ATMEL;
14816 		tg3_flag_set(tp, NVRAM_BUFFERED);
14817 		tg3_flag_set(tp, FLASH);
14818 
14819 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14820 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14821 			/* Detect size with tg3_nvram_get_size() */
14822 			break;
14823 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14824 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14825 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14826 			break;
14827 		default:
14828 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14829 			break;
14830 		}
14831 		break;
14832 	case FLASH_5717VENDOR_ST_M_M25PE10:
14833 	case FLASH_5717VENDOR_ST_A_M25PE10:
14834 	case FLASH_5717VENDOR_ST_M_M45PE10:
14835 	case FLASH_5717VENDOR_ST_A_M45PE10:
14836 	case FLASH_5717VENDOR_ST_M_M25PE20:
14837 	case FLASH_5717VENDOR_ST_A_M25PE20:
14838 	case FLASH_5717VENDOR_ST_M_M45PE20:
14839 	case FLASH_5717VENDOR_ST_A_M45PE20:
14840 	case FLASH_5717VENDOR_ST_25USPT:
14841 	case FLASH_5717VENDOR_ST_45USPT:
14842 		tp->nvram_jedecnum = JEDEC_ST;
14843 		tg3_flag_set(tp, NVRAM_BUFFERED);
14844 		tg3_flag_set(tp, FLASH);
14845 
14846 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14847 		case FLASH_5717VENDOR_ST_M_M25PE20:
14848 		case FLASH_5717VENDOR_ST_M_M45PE20:
14849 			/* Detect size with tg3_nvram_get_size() */
14850 			break;
14851 		case FLASH_5717VENDOR_ST_A_M25PE20:
14852 		case FLASH_5717VENDOR_ST_A_M45PE20:
14853 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14854 			break;
14855 		default:
14856 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14857 			break;
14858 		}
14859 		break;
14860 	default:
14861 		tg3_flag_set(tp, NO_NVRAM);
14862 		return;
14863 	}
14864 
14865 	tg3_nvram_get_pagesize(tp, nvcfg1);
14866 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14867 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14868 }
14869 
tg3_get_5720_nvram_info(struct tg3 * tp)14870 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14871 {
14872 	u32 nvcfg1, nvmpinstrp, nv_status;
14873 
14874 	nvcfg1 = tr32(NVRAM_CFG1);
14875 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14876 
14877 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14878 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14879 			tg3_flag_set(tp, NO_NVRAM);
14880 			return;
14881 		}
14882 
14883 		switch (nvmpinstrp) {
14884 		case FLASH_5762_MX25L_100:
14885 		case FLASH_5762_MX25L_200:
14886 		case FLASH_5762_MX25L_400:
14887 		case FLASH_5762_MX25L_800:
14888 		case FLASH_5762_MX25L_160_320:
14889 			tp->nvram_pagesize = 4096;
14890 			tp->nvram_jedecnum = JEDEC_MACRONIX;
14891 			tg3_flag_set(tp, NVRAM_BUFFERED);
14892 			tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14893 			tg3_flag_set(tp, FLASH);
14894 			nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14895 			tp->nvram_size =
14896 				(1 << (nv_status >> AUTOSENSE_DEVID &
14897 						AUTOSENSE_DEVID_MASK)
14898 					<< AUTOSENSE_SIZE_IN_MB);
14899 			return;
14900 
14901 		case FLASH_5762_EEPROM_HD:
14902 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14903 			break;
14904 		case FLASH_5762_EEPROM_LD:
14905 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14906 			break;
14907 		case FLASH_5720VENDOR_M_ST_M45PE20:
14908 			/* This pinstrap supports multiple sizes, so force it
14909 			 * to read the actual size from location 0xf0.
14910 			 */
14911 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14912 			break;
14913 		}
14914 	}
14915 
14916 	switch (nvmpinstrp) {
14917 	case FLASH_5720_EEPROM_HD:
14918 	case FLASH_5720_EEPROM_LD:
14919 		tp->nvram_jedecnum = JEDEC_ATMEL;
14920 		tg3_flag_set(tp, NVRAM_BUFFERED);
14921 
14922 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14923 		tw32(NVRAM_CFG1, nvcfg1);
14924 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14925 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14926 		else
14927 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14928 		return;
14929 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14930 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14931 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14932 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14933 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14934 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14935 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14936 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14937 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14938 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14939 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14940 	case FLASH_5720VENDOR_ATMEL_45USPT:
14941 		tp->nvram_jedecnum = JEDEC_ATMEL;
14942 		tg3_flag_set(tp, NVRAM_BUFFERED);
14943 		tg3_flag_set(tp, FLASH);
14944 
14945 		switch (nvmpinstrp) {
14946 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14947 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14948 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14949 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14950 			break;
14951 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14952 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14953 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14954 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14955 			break;
14956 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14957 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14958 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14959 			break;
14960 		default:
14961 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14962 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14963 			break;
14964 		}
14965 		break;
14966 	case FLASH_5720VENDOR_M_ST_M25PE10:
14967 	case FLASH_5720VENDOR_M_ST_M45PE10:
14968 	case FLASH_5720VENDOR_A_ST_M25PE10:
14969 	case FLASH_5720VENDOR_A_ST_M45PE10:
14970 	case FLASH_5720VENDOR_M_ST_M25PE20:
14971 	case FLASH_5720VENDOR_M_ST_M45PE20:
14972 	case FLASH_5720VENDOR_A_ST_M25PE20:
14973 	case FLASH_5720VENDOR_A_ST_M45PE20:
14974 	case FLASH_5720VENDOR_M_ST_M25PE40:
14975 	case FLASH_5720VENDOR_M_ST_M45PE40:
14976 	case FLASH_5720VENDOR_A_ST_M25PE40:
14977 	case FLASH_5720VENDOR_A_ST_M45PE40:
14978 	case FLASH_5720VENDOR_M_ST_M25PE80:
14979 	case FLASH_5720VENDOR_M_ST_M45PE80:
14980 	case FLASH_5720VENDOR_A_ST_M25PE80:
14981 	case FLASH_5720VENDOR_A_ST_M45PE80:
14982 	case FLASH_5720VENDOR_ST_25USPT:
14983 	case FLASH_5720VENDOR_ST_45USPT:
14984 		tp->nvram_jedecnum = JEDEC_ST;
14985 		tg3_flag_set(tp, NVRAM_BUFFERED);
14986 		tg3_flag_set(tp, FLASH);
14987 
14988 		switch (nvmpinstrp) {
14989 		case FLASH_5720VENDOR_M_ST_M25PE20:
14990 		case FLASH_5720VENDOR_M_ST_M45PE20:
14991 		case FLASH_5720VENDOR_A_ST_M25PE20:
14992 		case FLASH_5720VENDOR_A_ST_M45PE20:
14993 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14994 			break;
14995 		case FLASH_5720VENDOR_M_ST_M25PE40:
14996 		case FLASH_5720VENDOR_M_ST_M45PE40:
14997 		case FLASH_5720VENDOR_A_ST_M25PE40:
14998 		case FLASH_5720VENDOR_A_ST_M45PE40:
14999 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
15000 			break;
15001 		case FLASH_5720VENDOR_M_ST_M25PE80:
15002 		case FLASH_5720VENDOR_M_ST_M45PE80:
15003 		case FLASH_5720VENDOR_A_ST_M25PE80:
15004 		case FLASH_5720VENDOR_A_ST_M45PE80:
15005 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15006 			break;
15007 		default:
15008 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
15009 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15010 			break;
15011 		}
15012 		break;
15013 	default:
15014 		tg3_flag_set(tp, NO_NVRAM);
15015 		return;
15016 	}
15017 
15018 	tg3_nvram_get_pagesize(tp, nvcfg1);
15019 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
15020 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
15021 
15022 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
15023 		u32 val;
15024 
15025 		if (tg3_nvram_read(tp, 0, &val))
15026 			return;
15027 
15028 		if (val != TG3_EEPROM_MAGIC &&
15029 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
15030 			tg3_flag_set(tp, NO_NVRAM);
15031 	}
15032 }
15033 
15034 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
tg3_nvram_init(struct tg3 * tp)15035 static void tg3_nvram_init(struct tg3 *tp)
15036 {
15037 	if (tg3_flag(tp, IS_SSB_CORE)) {
15038 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15039 		tg3_flag_clear(tp, NVRAM);
15040 		tg3_flag_clear(tp, NVRAM_BUFFERED);
15041 		tg3_flag_set(tp, NO_NVRAM);
15042 		return;
15043 	}
15044 
15045 	tw32_f(GRC_EEPROM_ADDR,
15046 	     (EEPROM_ADDR_FSM_RESET |
15047 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
15048 	       EEPROM_ADDR_CLKPERD_SHIFT)));
15049 
15050 	msleep(1);
15051 
15052 	/* Enable seeprom accesses. */
15053 	tw32_f(GRC_LOCAL_CTRL,
15054 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15055 	udelay(100);
15056 
15057 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15058 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
15059 		tg3_flag_set(tp, NVRAM);
15060 
15061 		if (tg3_nvram_lock(tp)) {
15062 			netdev_warn(tp->dev,
15063 				    "Cannot get nvram lock, %s failed\n",
15064 				    __func__);
15065 			return;
15066 		}
15067 		tg3_enable_nvram_access(tp);
15068 
15069 		tp->nvram_size = 0;
15070 
15071 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
15072 			tg3_get_5752_nvram_info(tp);
15073 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15074 			tg3_get_5755_nvram_info(tp);
15075 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15076 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15077 			 tg3_asic_rev(tp) == ASIC_REV_5785)
15078 			tg3_get_5787_nvram_info(tp);
15079 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15080 			tg3_get_5761_nvram_info(tp);
15081 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15082 			tg3_get_5906_nvram_info(tp);
15083 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15084 			 tg3_flag(tp, 57765_CLASS))
15085 			tg3_get_57780_nvram_info(tp);
15086 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15087 			 tg3_asic_rev(tp) == ASIC_REV_5719)
15088 			tg3_get_5717_nvram_info(tp);
15089 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15090 			 tg3_asic_rev(tp) == ASIC_REV_5762)
15091 			tg3_get_5720_nvram_info(tp);
15092 		else
15093 			tg3_get_nvram_info(tp);
15094 
15095 		if (tp->nvram_size == 0)
15096 			tg3_get_nvram_size(tp);
15097 
15098 		tg3_disable_nvram_access(tp);
15099 		tg3_nvram_unlock(tp);
15100 
15101 	} else {
15102 		tg3_flag_clear(tp, NVRAM);
15103 		tg3_flag_clear(tp, NVRAM_BUFFERED);
15104 
15105 		tg3_get_eeprom_size(tp);
15106 	}
15107 }
15108 
15109 struct subsys_tbl_ent {
15110 	u16 subsys_vendor, subsys_devid;
15111 	u32 phy_id;
15112 };
15113 
15114 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15115 	/* Broadcom boards. */
15116 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15117 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15118 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15119 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15120 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15121 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15122 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15123 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15124 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15125 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15126 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15127 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15128 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15129 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15130 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15131 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15132 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15133 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15134 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15135 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15136 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15137 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15138 
15139 	/* 3com boards. */
15140 	{ TG3PCI_SUBVENDOR_ID_3COM,
15141 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15142 	{ TG3PCI_SUBVENDOR_ID_3COM,
15143 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15144 	{ TG3PCI_SUBVENDOR_ID_3COM,
15145 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15146 	{ TG3PCI_SUBVENDOR_ID_3COM,
15147 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15148 	{ TG3PCI_SUBVENDOR_ID_3COM,
15149 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15150 
15151 	/* DELL boards. */
15152 	{ TG3PCI_SUBVENDOR_ID_DELL,
15153 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15154 	{ TG3PCI_SUBVENDOR_ID_DELL,
15155 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15156 	{ TG3PCI_SUBVENDOR_ID_DELL,
15157 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15158 	{ TG3PCI_SUBVENDOR_ID_DELL,
15159 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15160 
15161 	/* Compaq boards. */
15162 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15163 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15164 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15165 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15166 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15167 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15168 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15169 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15170 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15171 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15172 
15173 	/* IBM boards. */
15174 	{ TG3PCI_SUBVENDOR_ID_IBM,
15175 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15176 };
15177 
tg3_lookup_by_subsys(struct tg3 * tp)15178 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15179 {
15180 	int i;
15181 
15182 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15183 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
15184 		     tp->pdev->subsystem_vendor) &&
15185 		    (subsys_id_to_phy_id[i].subsys_devid ==
15186 		     tp->pdev->subsystem_device))
15187 			return &subsys_id_to_phy_id[i];
15188 	}
15189 	return NULL;
15190 }
15191 
tg3_get_eeprom_hw_cfg(struct tg3 * tp)15192 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15193 {
15194 	u32 val;
15195 
15196 	tp->phy_id = TG3_PHY_ID_INVALID;
15197 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15198 
15199 	/* Assume an onboard device and WOL capable by default.  */
15200 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
15201 	tg3_flag_set(tp, WOL_CAP);
15202 
15203 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15204 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15205 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15206 			tg3_flag_set(tp, IS_NIC);
15207 		}
15208 		val = tr32(VCPU_CFGSHDW);
15209 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
15210 			tg3_flag_set(tp, ASPM_WORKAROUND);
15211 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15212 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15213 			tg3_flag_set(tp, WOL_ENABLE);
15214 			device_set_wakeup_enable(&tp->pdev->dev, true);
15215 		}
15216 		goto done;
15217 	}
15218 
15219 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15220 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15221 		u32 nic_cfg, led_cfg;
15222 		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15223 		u32 nic_phy_id, ver, eeprom_phy_id;
15224 		int eeprom_phy_serdes = 0;
15225 
15226 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15227 		tp->nic_sram_data_cfg = nic_cfg;
15228 
15229 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15230 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
15231 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15232 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15233 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
15234 		    (ver > 0) && (ver < 0x100))
15235 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15236 
15237 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
15238 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15239 
15240 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15241 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15242 		    tg3_asic_rev(tp) == ASIC_REV_5720)
15243 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15244 
15245 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15246 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15247 			eeprom_phy_serdes = 1;
15248 
15249 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15250 		if (nic_phy_id != 0) {
15251 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15252 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15253 
15254 			eeprom_phy_id  = (id1 >> 16) << 10;
15255 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
15256 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15257 		} else
15258 			eeprom_phy_id = 0;
15259 
15260 		tp->phy_id = eeprom_phy_id;
15261 		if (eeprom_phy_serdes) {
15262 			if (!tg3_flag(tp, 5705_PLUS))
15263 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15264 			else
15265 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15266 		}
15267 
15268 		if (tg3_flag(tp, 5750_PLUS))
15269 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15270 				    SHASTA_EXT_LED_MODE_MASK);
15271 		else
15272 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15273 
15274 		switch (led_cfg) {
15275 		default:
15276 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15277 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15278 			break;
15279 
15280 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15281 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15282 			break;
15283 
15284 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15285 			tp->led_ctrl = LED_CTRL_MODE_MAC;
15286 
15287 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
15288 			 * read on some older 5700/5701 bootcode.
15289 			 */
15290 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15291 			    tg3_asic_rev(tp) == ASIC_REV_5701)
15292 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15293 
15294 			break;
15295 
15296 		case SHASTA_EXT_LED_SHARED:
15297 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
15298 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15299 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15300 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15301 						 LED_CTRL_MODE_PHY_2);
15302 
15303 			if (tg3_flag(tp, 5717_PLUS) ||
15304 			    tg3_asic_rev(tp) == ASIC_REV_5762)
15305 				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15306 						LED_CTRL_BLINK_RATE_MASK;
15307 
15308 			break;
15309 
15310 		case SHASTA_EXT_LED_MAC:
15311 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15312 			break;
15313 
15314 		case SHASTA_EXT_LED_COMBO:
15315 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
15316 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15317 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15318 						 LED_CTRL_MODE_PHY_2);
15319 			break;
15320 
15321 		}
15322 
15323 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15324 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
15325 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15326 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15327 
15328 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15329 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15330 
15331 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15332 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
15333 			if ((tp->pdev->subsystem_vendor ==
15334 			     PCI_VENDOR_ID_ARIMA) &&
15335 			    (tp->pdev->subsystem_device == 0x205a ||
15336 			     tp->pdev->subsystem_device == 0x2063))
15337 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15338 		} else {
15339 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15340 			tg3_flag_set(tp, IS_NIC);
15341 		}
15342 
15343 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15344 			tg3_flag_set(tp, ENABLE_ASF);
15345 			if (tg3_flag(tp, 5750_PLUS))
15346 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15347 		}
15348 
15349 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15350 		    tg3_flag(tp, 5750_PLUS))
15351 			tg3_flag_set(tp, ENABLE_APE);
15352 
15353 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15354 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15355 			tg3_flag_clear(tp, WOL_CAP);
15356 
15357 		if (tg3_flag(tp, WOL_CAP) &&
15358 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15359 			tg3_flag_set(tp, WOL_ENABLE);
15360 			device_set_wakeup_enable(&tp->pdev->dev, true);
15361 		}
15362 
15363 		if (cfg2 & (1 << 17))
15364 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15365 
15366 		/* serdes signal pre-emphasis in register 0x590 set by */
15367 		/* bootcode if bit 18 is set */
15368 		if (cfg2 & (1 << 18))
15369 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15370 
15371 		if ((tg3_flag(tp, 57765_PLUS) ||
15372 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15373 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15374 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15375 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15376 
15377 		if (tg3_flag(tp, PCI_EXPRESS)) {
15378 			u32 cfg3;
15379 
15380 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15381 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15382 			    !tg3_flag(tp, 57765_PLUS) &&
15383 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15384 				tg3_flag_set(tp, ASPM_WORKAROUND);
15385 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15386 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15387 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15388 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15389 		}
15390 
15391 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15392 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15393 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15394 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15395 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15396 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15397 
15398 		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15399 			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15400 	}
15401 done:
15402 	if (tg3_flag(tp, WOL_CAP))
15403 		device_set_wakeup_enable(&tp->pdev->dev,
15404 					 tg3_flag(tp, WOL_ENABLE));
15405 	else
15406 		device_set_wakeup_capable(&tp->pdev->dev, false);
15407 }
15408 
tg3_ape_otp_read(struct tg3 * tp,u32 offset,u32 * val)15409 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15410 {
15411 	int i, err;
15412 	u32 val2, off = offset * 8;
15413 
15414 	err = tg3_nvram_lock(tp);
15415 	if (err)
15416 		return err;
15417 
15418 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15419 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15420 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15421 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15422 	udelay(10);
15423 
15424 	for (i = 0; i < 100; i++) {
15425 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15426 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15427 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15428 			break;
15429 		}
15430 		udelay(10);
15431 	}
15432 
15433 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15434 
15435 	tg3_nvram_unlock(tp);
15436 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15437 		return 0;
15438 
15439 	return -EBUSY;
15440 }
15441 
tg3_issue_otp_command(struct tg3 * tp,u32 cmd)15442 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15443 {
15444 	int i;
15445 	u32 val;
15446 
15447 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15448 	tw32(OTP_CTRL, cmd);
15449 
15450 	/* Wait for up to 1 ms for command to execute. */
15451 	for (i = 0; i < 100; i++) {
15452 		val = tr32(OTP_STATUS);
15453 		if (val & OTP_STATUS_CMD_DONE)
15454 			break;
15455 		udelay(10);
15456 	}
15457 
15458 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15459 }
15460 
15461 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15462  * configuration is a 32-bit value that straddles the alignment boundary.
15463  * We do two 32-bit reads and then shift and merge the results.
15464  */
tg3_read_otp_phycfg(struct tg3 * tp)15465 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15466 {
15467 	u32 bhalf_otp, thalf_otp;
15468 
15469 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15470 
15471 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15472 		return 0;
15473 
15474 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15475 
15476 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15477 		return 0;
15478 
15479 	thalf_otp = tr32(OTP_READ_DATA);
15480 
15481 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15482 
15483 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15484 		return 0;
15485 
15486 	bhalf_otp = tr32(OTP_READ_DATA);
15487 
15488 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15489 }
15490 
tg3_phy_init_link_config(struct tg3 * tp)15491 static void tg3_phy_init_link_config(struct tg3 *tp)
15492 {
15493 	u32 adv = ADVERTISED_Autoneg;
15494 
15495 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15496 		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15497 			adv |= ADVERTISED_1000baseT_Half;
15498 		adv |= ADVERTISED_1000baseT_Full;
15499 	}
15500 
15501 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15502 		adv |= ADVERTISED_100baseT_Half |
15503 		       ADVERTISED_100baseT_Full |
15504 		       ADVERTISED_10baseT_Half |
15505 		       ADVERTISED_10baseT_Full |
15506 		       ADVERTISED_TP;
15507 	else
15508 		adv |= ADVERTISED_FIBRE;
15509 
15510 	tp->link_config.advertising = adv;
15511 	tp->link_config.speed = SPEED_UNKNOWN;
15512 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15513 	tp->link_config.autoneg = AUTONEG_ENABLE;
15514 	tp->link_config.active_speed = SPEED_UNKNOWN;
15515 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15516 
15517 	tp->old_link = -1;
15518 }
15519 
tg3_phy_probe(struct tg3 * tp)15520 static int tg3_phy_probe(struct tg3 *tp)
15521 {
15522 	u32 hw_phy_id_1, hw_phy_id_2;
15523 	u32 hw_phy_id, hw_phy_id_masked;
15524 	int err;
15525 
15526 	/* flow control autonegotiation is default behavior */
15527 	tg3_flag_set(tp, PAUSE_AUTONEG);
15528 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15529 
15530 	if (tg3_flag(tp, ENABLE_APE)) {
15531 		switch (tp->pci_fn) {
15532 		case 0:
15533 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15534 			break;
15535 		case 1:
15536 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15537 			break;
15538 		case 2:
15539 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15540 			break;
15541 		case 3:
15542 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15543 			break;
15544 		}
15545 	}
15546 
15547 	if (!tg3_flag(tp, ENABLE_ASF) &&
15548 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15549 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15550 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15551 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15552 
15553 	if (tg3_flag(tp, USE_PHYLIB))
15554 		return tg3_phy_init(tp);
15555 
15556 	/* Reading the PHY ID register can conflict with ASF
15557 	 * firmware access to the PHY hardware.
15558 	 */
15559 	err = 0;
15560 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15561 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15562 	} else {
15563 		/* Now read the physical PHY_ID from the chip and verify
15564 		 * that it is sane.  If it doesn't look good, we fall back
15565 		 * to either the hard-coded table based PHY_ID and failing
15566 		 * that the value found in the eeprom area.
15567 		 */
15568 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15569 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15570 
15571 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15572 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15573 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15574 
15575 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15576 	}
15577 
15578 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15579 		tp->phy_id = hw_phy_id;
15580 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15581 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15582 		else
15583 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15584 	} else {
15585 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15586 			/* Do nothing, phy ID already set up in
15587 			 * tg3_get_eeprom_hw_cfg().
15588 			 */
15589 		} else {
15590 			struct subsys_tbl_ent *p;
15591 
15592 			/* No eeprom signature?  Try the hardcoded
15593 			 * subsys device table.
15594 			 */
15595 			p = tg3_lookup_by_subsys(tp);
15596 			if (p) {
15597 				tp->phy_id = p->phy_id;
15598 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15599 				/* For now we saw the IDs 0xbc050cd0,
15600 				 * 0xbc050f80 and 0xbc050c30 on devices
15601 				 * connected to an BCM4785 and there are
15602 				 * probably more. Just assume that the phy is
15603 				 * supported when it is connected to a SSB core
15604 				 * for now.
15605 				 */
15606 				return -ENODEV;
15607 			}
15608 
15609 			if (!tp->phy_id ||
15610 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15611 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15612 		}
15613 	}
15614 
15615 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15616 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15617 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15618 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15619 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15620 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15621 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15622 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15623 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15624 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15625 
15626 		tp->eee.supported = SUPPORTED_100baseT_Full |
15627 				    SUPPORTED_1000baseT_Full;
15628 		tp->eee.advertised = ADVERTISED_100baseT_Full |
15629 				     ADVERTISED_1000baseT_Full;
15630 		tp->eee.eee_enabled = 1;
15631 		tp->eee.tx_lpi_enabled = 1;
15632 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15633 	}
15634 
15635 	tg3_phy_init_link_config(tp);
15636 
15637 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15638 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15639 	    !tg3_flag(tp, ENABLE_APE) &&
15640 	    !tg3_flag(tp, ENABLE_ASF)) {
15641 		u32 bmsr, dummy;
15642 
15643 		tg3_readphy(tp, MII_BMSR, &bmsr);
15644 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15645 		    (bmsr & BMSR_LSTATUS))
15646 			goto skip_phy_reset;
15647 
15648 		err = tg3_phy_reset(tp);
15649 		if (err)
15650 			return err;
15651 
15652 		tg3_phy_set_wirespeed(tp);
15653 
15654 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15655 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15656 					    tp->link_config.flowctrl);
15657 
15658 			tg3_writephy(tp, MII_BMCR,
15659 				     BMCR_ANENABLE | BMCR_ANRESTART);
15660 		}
15661 	}
15662 
15663 skip_phy_reset:
15664 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15665 		err = tg3_init_5401phy_dsp(tp);
15666 		if (err)
15667 			return err;
15668 
15669 		err = tg3_init_5401phy_dsp(tp);
15670 	}
15671 
15672 	return err;
15673 }
15674 
tg3_read_vpd(struct tg3 * tp)15675 static void tg3_read_vpd(struct tg3 *tp)
15676 {
15677 	u8 *vpd_data;
15678 	unsigned int block_end, rosize, len;
15679 	u32 vpdlen;
15680 	int j, i = 0;
15681 
15682 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15683 	if (!vpd_data)
15684 		goto out_no_vpd;
15685 
15686 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15687 	if (i < 0)
15688 		goto out_not_found;
15689 
15690 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15691 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15692 	i += PCI_VPD_LRDT_TAG_SIZE;
15693 
15694 	if (block_end > vpdlen)
15695 		goto out_not_found;
15696 
15697 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15698 				      PCI_VPD_RO_KEYWORD_MFR_ID);
15699 	if (j > 0) {
15700 		len = pci_vpd_info_field_size(&vpd_data[j]);
15701 
15702 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15703 		if (j + len > block_end || len != 4 ||
15704 		    memcmp(&vpd_data[j], "1028", 4))
15705 			goto partno;
15706 
15707 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15708 					      PCI_VPD_RO_KEYWORD_VENDOR0);
15709 		if (j < 0)
15710 			goto partno;
15711 
15712 		len = pci_vpd_info_field_size(&vpd_data[j]);
15713 
15714 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15715 		if (j + len > block_end)
15716 			goto partno;
15717 
15718 		if (len >= sizeof(tp->fw_ver))
15719 			len = sizeof(tp->fw_ver) - 1;
15720 		memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15721 		snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15722 			 &vpd_data[j]);
15723 	}
15724 
15725 partno:
15726 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15727 				      PCI_VPD_RO_KEYWORD_PARTNO);
15728 	if (i < 0)
15729 		goto out_not_found;
15730 
15731 	len = pci_vpd_info_field_size(&vpd_data[i]);
15732 
15733 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
15734 	if (len > TG3_BPN_SIZE ||
15735 	    (len + i) > vpdlen)
15736 		goto out_not_found;
15737 
15738 	memcpy(tp->board_part_number, &vpd_data[i], len);
15739 
15740 out_not_found:
15741 	kfree(vpd_data);
15742 	if (tp->board_part_number[0])
15743 		return;
15744 
15745 out_no_vpd:
15746 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15747 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15748 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15749 			strcpy(tp->board_part_number, "BCM5717");
15750 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15751 			strcpy(tp->board_part_number, "BCM5718");
15752 		else
15753 			goto nomatch;
15754 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15755 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15756 			strcpy(tp->board_part_number, "BCM57780");
15757 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15758 			strcpy(tp->board_part_number, "BCM57760");
15759 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15760 			strcpy(tp->board_part_number, "BCM57790");
15761 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15762 			strcpy(tp->board_part_number, "BCM57788");
15763 		else
15764 			goto nomatch;
15765 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15766 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15767 			strcpy(tp->board_part_number, "BCM57761");
15768 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15769 			strcpy(tp->board_part_number, "BCM57765");
15770 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15771 			strcpy(tp->board_part_number, "BCM57781");
15772 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15773 			strcpy(tp->board_part_number, "BCM57785");
15774 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15775 			strcpy(tp->board_part_number, "BCM57791");
15776 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15777 			strcpy(tp->board_part_number, "BCM57795");
15778 		else
15779 			goto nomatch;
15780 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15781 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15782 			strcpy(tp->board_part_number, "BCM57762");
15783 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15784 			strcpy(tp->board_part_number, "BCM57766");
15785 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15786 			strcpy(tp->board_part_number, "BCM57782");
15787 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15788 			strcpy(tp->board_part_number, "BCM57786");
15789 		else
15790 			goto nomatch;
15791 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15792 		strcpy(tp->board_part_number, "BCM95906");
15793 	} else {
15794 nomatch:
15795 		strcpy(tp->board_part_number, "none");
15796 	}
15797 }
15798 
tg3_fw_img_is_valid(struct tg3 * tp,u32 offset)15799 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15800 {
15801 	u32 val;
15802 
15803 	if (tg3_nvram_read(tp, offset, &val) ||
15804 	    (val & 0xfc000000) != 0x0c000000 ||
15805 	    tg3_nvram_read(tp, offset + 4, &val) ||
15806 	    val != 0)
15807 		return 0;
15808 
15809 	return 1;
15810 }
15811 
tg3_read_bc_ver(struct tg3 * tp)15812 static void tg3_read_bc_ver(struct tg3 *tp)
15813 {
15814 	u32 val, offset, start, ver_offset;
15815 	int i, dst_off;
15816 	bool newver = false;
15817 
15818 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15819 	    tg3_nvram_read(tp, 0x4, &start))
15820 		return;
15821 
15822 	offset = tg3_nvram_logical_addr(tp, offset);
15823 
15824 	if (tg3_nvram_read(tp, offset, &val))
15825 		return;
15826 
15827 	if ((val & 0xfc000000) == 0x0c000000) {
15828 		if (tg3_nvram_read(tp, offset + 4, &val))
15829 			return;
15830 
15831 		if (val == 0)
15832 			newver = true;
15833 	}
15834 
15835 	dst_off = strlen(tp->fw_ver);
15836 
15837 	if (newver) {
15838 		if (TG3_VER_SIZE - dst_off < 16 ||
15839 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15840 			return;
15841 
15842 		offset = offset + ver_offset - start;
15843 		for (i = 0; i < 16; i += 4) {
15844 			__be32 v;
15845 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15846 				return;
15847 
15848 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15849 		}
15850 	} else {
15851 		u32 major, minor;
15852 
15853 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15854 			return;
15855 
15856 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15857 			TG3_NVM_BCVER_MAJSFT;
15858 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15859 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15860 			 "v%d.%02d", major, minor);
15861 	}
15862 }
15863 
tg3_read_hwsb_ver(struct tg3 * tp)15864 static void tg3_read_hwsb_ver(struct tg3 *tp)
15865 {
15866 	u32 val, major, minor;
15867 
15868 	/* Use native endian representation */
15869 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15870 		return;
15871 
15872 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15873 		TG3_NVM_HWSB_CFG1_MAJSFT;
15874 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15875 		TG3_NVM_HWSB_CFG1_MINSFT;
15876 
15877 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15878 }
15879 
tg3_read_sb_ver(struct tg3 * tp,u32 val)15880 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15881 {
15882 	u32 offset, major, minor, build;
15883 
15884 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15885 
15886 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15887 		return;
15888 
15889 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15890 	case TG3_EEPROM_SB_REVISION_0:
15891 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15892 		break;
15893 	case TG3_EEPROM_SB_REVISION_2:
15894 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15895 		break;
15896 	case TG3_EEPROM_SB_REVISION_3:
15897 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15898 		break;
15899 	case TG3_EEPROM_SB_REVISION_4:
15900 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15901 		break;
15902 	case TG3_EEPROM_SB_REVISION_5:
15903 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15904 		break;
15905 	case TG3_EEPROM_SB_REVISION_6:
15906 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15907 		break;
15908 	default:
15909 		return;
15910 	}
15911 
15912 	if (tg3_nvram_read(tp, offset, &val))
15913 		return;
15914 
15915 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15916 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15917 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15918 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15919 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15920 
15921 	if (minor > 99 || build > 26)
15922 		return;
15923 
15924 	offset = strlen(tp->fw_ver);
15925 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15926 		 " v%d.%02d", major, minor);
15927 
15928 	if (build > 0) {
15929 		offset = strlen(tp->fw_ver);
15930 		if (offset < TG3_VER_SIZE - 1)
15931 			tp->fw_ver[offset] = 'a' + build - 1;
15932 	}
15933 }
15934 
tg3_read_mgmtfw_ver(struct tg3 * tp)15935 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15936 {
15937 	u32 val, offset, start;
15938 	int i, vlen;
15939 
15940 	for (offset = TG3_NVM_DIR_START;
15941 	     offset < TG3_NVM_DIR_END;
15942 	     offset += TG3_NVM_DIRENT_SIZE) {
15943 		if (tg3_nvram_read(tp, offset, &val))
15944 			return;
15945 
15946 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15947 			break;
15948 	}
15949 
15950 	if (offset == TG3_NVM_DIR_END)
15951 		return;
15952 
15953 	if (!tg3_flag(tp, 5705_PLUS))
15954 		start = 0x08000000;
15955 	else if (tg3_nvram_read(tp, offset - 4, &start))
15956 		return;
15957 
15958 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15959 	    !tg3_fw_img_is_valid(tp, offset) ||
15960 	    tg3_nvram_read(tp, offset + 8, &val))
15961 		return;
15962 
15963 	offset += val - start;
15964 
15965 	vlen = strlen(tp->fw_ver);
15966 
15967 	tp->fw_ver[vlen++] = ',';
15968 	tp->fw_ver[vlen++] = ' ';
15969 
15970 	for (i = 0; i < 4; i++) {
15971 		__be32 v;
15972 		if (tg3_nvram_read_be32(tp, offset, &v))
15973 			return;
15974 
15975 		offset += sizeof(v);
15976 
15977 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15978 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15979 			break;
15980 		}
15981 
15982 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15983 		vlen += sizeof(v);
15984 	}
15985 }
15986 
tg3_probe_ncsi(struct tg3 * tp)15987 static void tg3_probe_ncsi(struct tg3 *tp)
15988 {
15989 	u32 apedata;
15990 
15991 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15992 	if (apedata != APE_SEG_SIG_MAGIC)
15993 		return;
15994 
15995 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15996 	if (!(apedata & APE_FW_STATUS_READY))
15997 		return;
15998 
15999 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
16000 		tg3_flag_set(tp, APE_HAS_NCSI);
16001 }
16002 
tg3_read_dash_ver(struct tg3 * tp)16003 static void tg3_read_dash_ver(struct tg3 *tp)
16004 {
16005 	int vlen;
16006 	u32 apedata;
16007 	char *fwtype;
16008 
16009 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
16010 
16011 	if (tg3_flag(tp, APE_HAS_NCSI))
16012 		fwtype = "NCSI";
16013 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
16014 		fwtype = "SMASH";
16015 	else
16016 		fwtype = "DASH";
16017 
16018 	vlen = strlen(tp->fw_ver);
16019 
16020 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
16021 		 fwtype,
16022 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
16023 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
16024 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
16025 		 (apedata & APE_FW_VERSION_BLDMSK));
16026 }
16027 
tg3_read_otp_ver(struct tg3 * tp)16028 static void tg3_read_otp_ver(struct tg3 *tp)
16029 {
16030 	u32 val, val2;
16031 
16032 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
16033 		return;
16034 
16035 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
16036 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
16037 	    TG3_OTP_MAGIC0_VALID(val)) {
16038 		u64 val64 = (u64) val << 32 | val2;
16039 		u32 ver = 0;
16040 		int i, vlen;
16041 
16042 		for (i = 0; i < 7; i++) {
16043 			if ((val64 & 0xff) == 0)
16044 				break;
16045 			ver = val64 & 0xff;
16046 			val64 >>= 8;
16047 		}
16048 		vlen = strlen(tp->fw_ver);
16049 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16050 	}
16051 }
16052 
tg3_read_fw_ver(struct tg3 * tp)16053 static void tg3_read_fw_ver(struct tg3 *tp)
16054 {
16055 	u32 val;
16056 	bool vpd_vers = false;
16057 
16058 	if (tp->fw_ver[0] != 0)
16059 		vpd_vers = true;
16060 
16061 	if (tg3_flag(tp, NO_NVRAM)) {
16062 		strcat(tp->fw_ver, "sb");
16063 		tg3_read_otp_ver(tp);
16064 		return;
16065 	}
16066 
16067 	if (tg3_nvram_read(tp, 0, &val))
16068 		return;
16069 
16070 	if (val == TG3_EEPROM_MAGIC)
16071 		tg3_read_bc_ver(tp);
16072 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16073 		tg3_read_sb_ver(tp, val);
16074 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16075 		tg3_read_hwsb_ver(tp);
16076 
16077 	if (tg3_flag(tp, ENABLE_ASF)) {
16078 		if (tg3_flag(tp, ENABLE_APE)) {
16079 			tg3_probe_ncsi(tp);
16080 			if (!vpd_vers)
16081 				tg3_read_dash_ver(tp);
16082 		} else if (!vpd_vers) {
16083 			tg3_read_mgmtfw_ver(tp);
16084 		}
16085 	}
16086 
16087 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16088 }
16089 
tg3_rx_ret_ring_size(struct tg3 * tp)16090 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16091 {
16092 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
16093 		return TG3_RX_RET_MAX_SIZE_5717;
16094 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16095 		return TG3_RX_RET_MAX_SIZE_5700;
16096 	else
16097 		return TG3_RX_RET_MAX_SIZE_5705;
16098 }
16099 
16100 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16101 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16102 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16103 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16104 	{ },
16105 };
16106 
tg3_find_peer(struct tg3 * tp)16107 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16108 {
16109 	struct pci_dev *peer;
16110 	unsigned int func, devnr = tp->pdev->devfn & ~7;
16111 
16112 	for (func = 0; func < 8; func++) {
16113 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
16114 		if (peer && peer != tp->pdev)
16115 			break;
16116 		pci_dev_put(peer);
16117 	}
16118 	/* 5704 can be configured in single-port mode, set peer to
16119 	 * tp->pdev in that case.
16120 	 */
16121 	if (!peer) {
16122 		peer = tp->pdev;
16123 		return peer;
16124 	}
16125 
16126 	/*
16127 	 * We don't need to keep the refcount elevated; there's no way
16128 	 * to remove one half of this device without removing the other
16129 	 */
16130 	pci_dev_put(peer);
16131 
16132 	return peer;
16133 }
16134 
tg3_detect_asic_rev(struct tg3 * tp,u32 misc_ctrl_reg)16135 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16136 {
16137 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16138 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16139 		u32 reg;
16140 
16141 		/* All devices that use the alternate
16142 		 * ASIC REV location have a CPMU.
16143 		 */
16144 		tg3_flag_set(tp, CPMU_PRESENT);
16145 
16146 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16147 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16148 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16149 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16150 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16151 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16152 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16153 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16154 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16155 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16156 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16157 			reg = TG3PCI_GEN2_PRODID_ASICREV;
16158 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16159 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16160 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16161 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16162 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16163 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16164 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16165 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16166 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16167 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16168 			reg = TG3PCI_GEN15_PRODID_ASICREV;
16169 		else
16170 			reg = TG3PCI_PRODID_ASICREV;
16171 
16172 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16173 	}
16174 
16175 	/* Wrong chip ID in 5752 A0. This code can be removed later
16176 	 * as A0 is not in production.
16177 	 */
16178 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16179 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16180 
16181 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16182 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16183 
16184 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16185 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16186 	    tg3_asic_rev(tp) == ASIC_REV_5720)
16187 		tg3_flag_set(tp, 5717_PLUS);
16188 
16189 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16190 	    tg3_asic_rev(tp) == ASIC_REV_57766)
16191 		tg3_flag_set(tp, 57765_CLASS);
16192 
16193 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16194 	     tg3_asic_rev(tp) == ASIC_REV_5762)
16195 		tg3_flag_set(tp, 57765_PLUS);
16196 
16197 	/* Intentionally exclude ASIC_REV_5906 */
16198 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16199 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16200 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16201 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16202 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16203 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16204 	    tg3_flag(tp, 57765_PLUS))
16205 		tg3_flag_set(tp, 5755_PLUS);
16206 
16207 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16208 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16209 		tg3_flag_set(tp, 5780_CLASS);
16210 
16211 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16212 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16213 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
16214 	    tg3_flag(tp, 5755_PLUS) ||
16215 	    tg3_flag(tp, 5780_CLASS))
16216 		tg3_flag_set(tp, 5750_PLUS);
16217 
16218 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16219 	    tg3_flag(tp, 5750_PLUS))
16220 		tg3_flag_set(tp, 5705_PLUS);
16221 }
16222 
tg3_10_100_only_device(struct tg3 * tp,const struct pci_device_id * ent)16223 static bool tg3_10_100_only_device(struct tg3 *tp,
16224 				   const struct pci_device_id *ent)
16225 {
16226 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16227 
16228 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16229 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16230 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
16231 		return true;
16232 
16233 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16234 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16235 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16236 				return true;
16237 		} else {
16238 			return true;
16239 		}
16240 	}
16241 
16242 	return false;
16243 }
16244 
tg3_get_invariants(struct tg3 * tp,const struct pci_device_id * ent)16245 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16246 {
16247 	u32 misc_ctrl_reg;
16248 	u32 pci_state_reg, grc_misc_cfg;
16249 	u32 val;
16250 	u16 pci_cmd;
16251 	int err;
16252 
16253 	/* Force memory write invalidate off.  If we leave it on,
16254 	 * then on 5700_BX chips we have to enable a workaround.
16255 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16256 	 * to match the cacheline size.  The Broadcom driver have this
16257 	 * workaround but turns MWI off all the times so never uses
16258 	 * it.  This seems to suggest that the workaround is insufficient.
16259 	 */
16260 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16261 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16262 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16263 
16264 	/* Important! -- Make sure register accesses are byteswapped
16265 	 * correctly.  Also, for those chips that require it, make
16266 	 * sure that indirect register accesses are enabled before
16267 	 * the first operation.
16268 	 */
16269 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16270 			      &misc_ctrl_reg);
16271 	tp->misc_host_ctrl |= (misc_ctrl_reg &
16272 			       MISC_HOST_CTRL_CHIPREV);
16273 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16274 			       tp->misc_host_ctrl);
16275 
16276 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
16277 
16278 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16279 	 * we need to disable memory and use config. cycles
16280 	 * only to access all registers. The 5702/03 chips
16281 	 * can mistakenly decode the special cycles from the
16282 	 * ICH chipsets as memory write cycles, causing corruption
16283 	 * of register and memory space. Only certain ICH bridges
16284 	 * will drive special cycles with non-zero data during the
16285 	 * address phase which can fall within the 5703's address
16286 	 * range. This is not an ICH bug as the PCI spec allows
16287 	 * non-zero address during special cycles. However, only
16288 	 * these ICH bridges are known to drive non-zero addresses
16289 	 * during special cycles.
16290 	 *
16291 	 * Since special cycles do not cross PCI bridges, we only
16292 	 * enable this workaround if the 5703 is on the secondary
16293 	 * bus of these ICH bridges.
16294 	 */
16295 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16296 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16297 		static struct tg3_dev_id {
16298 			u32	vendor;
16299 			u32	device;
16300 			u32	rev;
16301 		} ich_chipsets[] = {
16302 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16303 			  PCI_ANY_ID },
16304 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16305 			  PCI_ANY_ID },
16306 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16307 			  0xa },
16308 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16309 			  PCI_ANY_ID },
16310 			{ },
16311 		};
16312 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
16313 		struct pci_dev *bridge = NULL;
16314 
16315 		while (pci_id->vendor != 0) {
16316 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
16317 						bridge);
16318 			if (!bridge) {
16319 				pci_id++;
16320 				continue;
16321 			}
16322 			if (pci_id->rev != PCI_ANY_ID) {
16323 				if (bridge->revision > pci_id->rev)
16324 					continue;
16325 			}
16326 			if (bridge->subordinate &&
16327 			    (bridge->subordinate->number ==
16328 			     tp->pdev->bus->number)) {
16329 				tg3_flag_set(tp, ICH_WORKAROUND);
16330 				pci_dev_put(bridge);
16331 				break;
16332 			}
16333 		}
16334 	}
16335 
16336 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16337 		static struct tg3_dev_id {
16338 			u32	vendor;
16339 			u32	device;
16340 		} bridge_chipsets[] = {
16341 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16342 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16343 			{ },
16344 		};
16345 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16346 		struct pci_dev *bridge = NULL;
16347 
16348 		while (pci_id->vendor != 0) {
16349 			bridge = pci_get_device(pci_id->vendor,
16350 						pci_id->device,
16351 						bridge);
16352 			if (!bridge) {
16353 				pci_id++;
16354 				continue;
16355 			}
16356 			if (bridge->subordinate &&
16357 			    (bridge->subordinate->number <=
16358 			     tp->pdev->bus->number) &&
16359 			    (bridge->subordinate->busn_res.end >=
16360 			     tp->pdev->bus->number)) {
16361 				tg3_flag_set(tp, 5701_DMA_BUG);
16362 				pci_dev_put(bridge);
16363 				break;
16364 			}
16365 		}
16366 	}
16367 
16368 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16369 	 * DMA addresses > 40-bit. This bridge may have other additional
16370 	 * 57xx devices behind it in some 4-port NIC designs for example.
16371 	 * Any tg3 device found behind the bridge will also need the 40-bit
16372 	 * DMA workaround.
16373 	 */
16374 	if (tg3_flag(tp, 5780_CLASS)) {
16375 		tg3_flag_set(tp, 40BIT_DMA_BUG);
16376 		tp->msi_cap = tp->pdev->msi_cap;
16377 	} else {
16378 		struct pci_dev *bridge = NULL;
16379 
16380 		do {
16381 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16382 						PCI_DEVICE_ID_SERVERWORKS_EPB,
16383 						bridge);
16384 			if (bridge && bridge->subordinate &&
16385 			    (bridge->subordinate->number <=
16386 			     tp->pdev->bus->number) &&
16387 			    (bridge->subordinate->busn_res.end >=
16388 			     tp->pdev->bus->number)) {
16389 				tg3_flag_set(tp, 40BIT_DMA_BUG);
16390 				pci_dev_put(bridge);
16391 				break;
16392 			}
16393 		} while (bridge);
16394 	}
16395 
16396 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16397 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16398 		tp->pdev_peer = tg3_find_peer(tp);
16399 
16400 	/* Determine TSO capabilities */
16401 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16402 		; /* Do nothing. HW bug. */
16403 	else if (tg3_flag(tp, 57765_PLUS))
16404 		tg3_flag_set(tp, HW_TSO_3);
16405 	else if (tg3_flag(tp, 5755_PLUS) ||
16406 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16407 		tg3_flag_set(tp, HW_TSO_2);
16408 	else if (tg3_flag(tp, 5750_PLUS)) {
16409 		tg3_flag_set(tp, HW_TSO_1);
16410 		tg3_flag_set(tp, TSO_BUG);
16411 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16412 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16413 			tg3_flag_clear(tp, TSO_BUG);
16414 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16415 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16416 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16417 		tg3_flag_set(tp, FW_TSO);
16418 		tg3_flag_set(tp, TSO_BUG);
16419 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16420 			tp->fw_needed = FIRMWARE_TG3TSO5;
16421 		else
16422 			tp->fw_needed = FIRMWARE_TG3TSO;
16423 	}
16424 
16425 	/* Selectively allow TSO based on operating conditions */
16426 	if (tg3_flag(tp, HW_TSO_1) ||
16427 	    tg3_flag(tp, HW_TSO_2) ||
16428 	    tg3_flag(tp, HW_TSO_3) ||
16429 	    tg3_flag(tp, FW_TSO)) {
16430 		/* For firmware TSO, assume ASF is disabled.
16431 		 * We'll disable TSO later if we discover ASF
16432 		 * is enabled in tg3_get_eeprom_hw_cfg().
16433 		 */
16434 		tg3_flag_set(tp, TSO_CAPABLE);
16435 	} else {
16436 		tg3_flag_clear(tp, TSO_CAPABLE);
16437 		tg3_flag_clear(tp, TSO_BUG);
16438 		tp->fw_needed = NULL;
16439 	}
16440 
16441 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16442 		tp->fw_needed = FIRMWARE_TG3;
16443 
16444 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16445 		tp->fw_needed = FIRMWARE_TG357766;
16446 
16447 	tp->irq_max = 1;
16448 
16449 	if (tg3_flag(tp, 5750_PLUS)) {
16450 		tg3_flag_set(tp, SUPPORT_MSI);
16451 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16452 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16453 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16454 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16455 		     tp->pdev_peer == tp->pdev))
16456 			tg3_flag_clear(tp, SUPPORT_MSI);
16457 
16458 		if (tg3_flag(tp, 5755_PLUS) ||
16459 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16460 			tg3_flag_set(tp, 1SHOT_MSI);
16461 		}
16462 
16463 		if (tg3_flag(tp, 57765_PLUS)) {
16464 			tg3_flag_set(tp, SUPPORT_MSIX);
16465 			tp->irq_max = TG3_IRQ_MAX_VECS;
16466 		}
16467 	}
16468 
16469 	tp->txq_max = 1;
16470 	tp->rxq_max = 1;
16471 	if (tp->irq_max > 1) {
16472 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16473 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16474 
16475 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16476 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16477 			tp->txq_max = tp->irq_max - 1;
16478 	}
16479 
16480 	if (tg3_flag(tp, 5755_PLUS) ||
16481 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16482 		tg3_flag_set(tp, SHORT_DMA_BUG);
16483 
16484 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16485 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16486 
16487 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16488 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16489 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16490 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16491 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16492 
16493 	if (tg3_flag(tp, 57765_PLUS) &&
16494 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16495 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16496 
16497 	if (!tg3_flag(tp, 5705_PLUS) ||
16498 	    tg3_flag(tp, 5780_CLASS) ||
16499 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16500 		tg3_flag_set(tp, JUMBO_CAPABLE);
16501 
16502 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16503 			      &pci_state_reg);
16504 
16505 	if (pci_is_pcie(tp->pdev)) {
16506 		u16 lnkctl;
16507 
16508 		tg3_flag_set(tp, PCI_EXPRESS);
16509 
16510 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16511 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16512 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16513 				tg3_flag_clear(tp, HW_TSO_2);
16514 				tg3_flag_clear(tp, TSO_CAPABLE);
16515 			}
16516 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16517 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16518 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16519 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16520 				tg3_flag_set(tp, CLKREQ_BUG);
16521 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16522 			tg3_flag_set(tp, L1PLLPD_EN);
16523 		}
16524 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16525 		/* BCM5785 devices are effectively PCIe devices, and should
16526 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16527 		 * section.
16528 		 */
16529 		tg3_flag_set(tp, PCI_EXPRESS);
16530 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16531 		   tg3_flag(tp, 5780_CLASS)) {
16532 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16533 		if (!tp->pcix_cap) {
16534 			dev_err(&tp->pdev->dev,
16535 				"Cannot find PCI-X capability, aborting\n");
16536 			return -EIO;
16537 		}
16538 
16539 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16540 			tg3_flag_set(tp, PCIX_MODE);
16541 	}
16542 
16543 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16544 	 * reordering to the mailbox registers done by the host
16545 	 * controller can cause major troubles.  We read back from
16546 	 * every mailbox register write to force the writes to be
16547 	 * posted to the chip in order.
16548 	 */
16549 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16550 	    !tg3_flag(tp, PCI_EXPRESS))
16551 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16552 
16553 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16554 			     &tp->pci_cacheline_sz);
16555 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16556 			     &tp->pci_lat_timer);
16557 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16558 	    tp->pci_lat_timer < 64) {
16559 		tp->pci_lat_timer = 64;
16560 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16561 				      tp->pci_lat_timer);
16562 	}
16563 
16564 	/* Important! -- It is critical that the PCI-X hw workaround
16565 	 * situation is decided before the first MMIO register access.
16566 	 */
16567 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16568 		/* 5700 BX chips need to have their TX producer index
16569 		 * mailboxes written twice to workaround a bug.
16570 		 */
16571 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16572 
16573 		/* If we are in PCI-X mode, enable register write workaround.
16574 		 *
16575 		 * The workaround is to use indirect register accesses
16576 		 * for all chip writes not to mailbox registers.
16577 		 */
16578 		if (tg3_flag(tp, PCIX_MODE)) {
16579 			u32 pm_reg;
16580 
16581 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16582 
16583 			/* The chip can have it's power management PCI config
16584 			 * space registers clobbered due to this bug.
16585 			 * So explicitly force the chip into D0 here.
16586 			 */
16587 			pci_read_config_dword(tp->pdev,
16588 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16589 					      &pm_reg);
16590 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16591 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16592 			pci_write_config_dword(tp->pdev,
16593 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16594 					       pm_reg);
16595 
16596 			/* Also, force SERR#/PERR# in PCI command. */
16597 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16598 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16599 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16600 		}
16601 	}
16602 
16603 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16604 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16605 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16606 		tg3_flag_set(tp, PCI_32BIT);
16607 
16608 	/* Chip-specific fixup from Broadcom driver */
16609 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16610 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16611 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16612 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16613 	}
16614 
16615 	/* Default fast path register access methods */
16616 	tp->read32 = tg3_read32;
16617 	tp->write32 = tg3_write32;
16618 	tp->read32_mbox = tg3_read32;
16619 	tp->write32_mbox = tg3_write32;
16620 	tp->write32_tx_mbox = tg3_write32;
16621 	tp->write32_rx_mbox = tg3_write32;
16622 
16623 	/* Various workaround register access methods */
16624 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16625 		tp->write32 = tg3_write_indirect_reg32;
16626 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16627 		 (tg3_flag(tp, PCI_EXPRESS) &&
16628 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16629 		/*
16630 		 * Back to back register writes can cause problems on these
16631 		 * chips, the workaround is to read back all reg writes
16632 		 * except those to mailbox regs.
16633 		 *
16634 		 * See tg3_write_indirect_reg32().
16635 		 */
16636 		tp->write32 = tg3_write_flush_reg32;
16637 	}
16638 
16639 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16640 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16641 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16642 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16643 	}
16644 
16645 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16646 		tp->read32 = tg3_read_indirect_reg32;
16647 		tp->write32 = tg3_write_indirect_reg32;
16648 		tp->read32_mbox = tg3_read_indirect_mbox;
16649 		tp->write32_mbox = tg3_write_indirect_mbox;
16650 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16651 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16652 
16653 		iounmap(tp->regs);
16654 		tp->regs = NULL;
16655 
16656 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16657 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16658 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16659 	}
16660 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16661 		tp->read32_mbox = tg3_read32_mbox_5906;
16662 		tp->write32_mbox = tg3_write32_mbox_5906;
16663 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16664 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16665 	}
16666 
16667 	if (tp->write32 == tg3_write_indirect_reg32 ||
16668 	    (tg3_flag(tp, PCIX_MODE) &&
16669 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16670 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16671 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16672 
16673 	/* The memory arbiter has to be enabled in order for SRAM accesses
16674 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16675 	 * sure it is enabled, but other entities such as system netboot
16676 	 * code might disable it.
16677 	 */
16678 	val = tr32(MEMARB_MODE);
16679 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16680 
16681 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16682 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16683 	    tg3_flag(tp, 5780_CLASS)) {
16684 		if (tg3_flag(tp, PCIX_MODE)) {
16685 			pci_read_config_dword(tp->pdev,
16686 					      tp->pcix_cap + PCI_X_STATUS,
16687 					      &val);
16688 			tp->pci_fn = val & 0x7;
16689 		}
16690 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16691 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16692 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16693 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16694 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16695 			val = tr32(TG3_CPMU_STATUS);
16696 
16697 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16698 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16699 		else
16700 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16701 				     TG3_CPMU_STATUS_FSHFT_5719;
16702 	}
16703 
16704 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16705 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16706 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16707 	}
16708 
16709 	/* Get eeprom hw config before calling tg3_set_power_state().
16710 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16711 	 * determined before calling tg3_set_power_state() so that
16712 	 * we know whether or not to switch out of Vaux power.
16713 	 * When the flag is set, it means that GPIO1 is used for eeprom
16714 	 * write protect and also implies that it is a LOM where GPIOs
16715 	 * are not used to switch power.
16716 	 */
16717 	tg3_get_eeprom_hw_cfg(tp);
16718 
16719 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16720 		tg3_flag_clear(tp, TSO_CAPABLE);
16721 		tg3_flag_clear(tp, TSO_BUG);
16722 		tp->fw_needed = NULL;
16723 	}
16724 
16725 	if (tg3_flag(tp, ENABLE_APE)) {
16726 		/* Allow reads and writes to the
16727 		 * APE register and memory space.
16728 		 */
16729 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16730 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16731 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16732 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16733 				       pci_state_reg);
16734 
16735 		tg3_ape_lock_init(tp);
16736 		tp->ape_hb_interval =
16737 			msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16738 	}
16739 
16740 	/* Set up tp->grc_local_ctrl before calling
16741 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16742 	 * will bring 5700's external PHY out of reset.
16743 	 * It is also used as eeprom write protect on LOMs.
16744 	 */
16745 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16746 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16747 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16748 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16749 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16750 	/* Unused GPIO3 must be driven as output on 5752 because there
16751 	 * are no pull-up resistors on unused GPIO pins.
16752 	 */
16753 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16754 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16755 
16756 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16757 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16758 	    tg3_flag(tp, 57765_CLASS))
16759 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16760 
16761 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16762 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16763 		/* Turn off the debug UART. */
16764 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16765 		if (tg3_flag(tp, IS_NIC))
16766 			/* Keep VMain power. */
16767 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16768 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16769 	}
16770 
16771 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16772 		tp->grc_local_ctrl |=
16773 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16774 
16775 	/* Switch out of Vaux if it is a NIC */
16776 	tg3_pwrsrc_switch_to_vmain(tp);
16777 
16778 	/* Derive initial jumbo mode from MTU assigned in
16779 	 * ether_setup() via the alloc_etherdev() call
16780 	 */
16781 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16782 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16783 
16784 	/* Determine WakeOnLan speed to use. */
16785 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16786 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16787 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16788 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16789 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16790 	} else {
16791 		tg3_flag_set(tp, WOL_SPEED_100MB);
16792 	}
16793 
16794 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16795 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16796 
16797 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16798 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16799 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16800 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16801 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16802 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16803 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16804 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16805 
16806 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16807 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16808 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16809 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16810 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16811 
16812 	if (tg3_flag(tp, 5705_PLUS) &&
16813 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16814 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16815 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16816 	    !tg3_flag(tp, 57765_PLUS)) {
16817 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16818 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16819 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16820 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16821 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16822 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16823 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16824 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16825 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16826 		} else
16827 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16828 	}
16829 
16830 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16831 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16832 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16833 		if (tp->phy_otp == 0)
16834 			tp->phy_otp = TG3_OTP_DEFAULT;
16835 	}
16836 
16837 	if (tg3_flag(tp, CPMU_PRESENT))
16838 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16839 	else
16840 		tp->mi_mode = MAC_MI_MODE_BASE;
16841 
16842 	tp->coalesce_mode = 0;
16843 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16844 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16845 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16846 
16847 	/* Set these bits to enable statistics workaround. */
16848 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16849 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
16850 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16851 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16852 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16853 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16854 	}
16855 
16856 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16857 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16858 		tg3_flag_set(tp, USE_PHYLIB);
16859 
16860 	err = tg3_mdio_init(tp);
16861 	if (err)
16862 		return err;
16863 
16864 	/* Initialize data/descriptor byte/word swapping. */
16865 	val = tr32(GRC_MODE);
16866 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16867 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16868 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16869 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16870 			GRC_MODE_B2HRX_ENABLE |
16871 			GRC_MODE_HTX2B_ENABLE |
16872 			GRC_MODE_HOST_STACKUP);
16873 	else
16874 		val &= GRC_MODE_HOST_STACKUP;
16875 
16876 	tw32(GRC_MODE, val | tp->grc_mode);
16877 
16878 	tg3_switch_clocks(tp);
16879 
16880 	/* Clear this out for sanity. */
16881 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16882 
16883 	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16884 	tw32(TG3PCI_REG_BASE_ADDR, 0);
16885 
16886 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16887 			      &pci_state_reg);
16888 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16889 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16890 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16891 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16892 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16893 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16894 			void __iomem *sram_base;
16895 
16896 			/* Write some dummy words into the SRAM status block
16897 			 * area, see if it reads back correctly.  If the return
16898 			 * value is bad, force enable the PCIX workaround.
16899 			 */
16900 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16901 
16902 			writel(0x00000000, sram_base);
16903 			writel(0x00000000, sram_base + 4);
16904 			writel(0xffffffff, sram_base + 4);
16905 			if (readl(sram_base) != 0x00000000)
16906 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16907 		}
16908 	}
16909 
16910 	udelay(50);
16911 	tg3_nvram_init(tp);
16912 
16913 	/* If the device has an NVRAM, no need to load patch firmware */
16914 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16915 	    !tg3_flag(tp, NO_NVRAM))
16916 		tp->fw_needed = NULL;
16917 
16918 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16919 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16920 
16921 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16922 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16923 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16924 		tg3_flag_set(tp, IS_5788);
16925 
16926 	if (!tg3_flag(tp, IS_5788) &&
16927 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16928 		tg3_flag_set(tp, TAGGED_STATUS);
16929 	if (tg3_flag(tp, TAGGED_STATUS)) {
16930 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16931 				      HOSTCC_MODE_CLRTICK_TXBD);
16932 
16933 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16934 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16935 				       tp->misc_host_ctrl);
16936 	}
16937 
16938 	/* Preserve the APE MAC_MODE bits */
16939 	if (tg3_flag(tp, ENABLE_APE))
16940 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16941 	else
16942 		tp->mac_mode = 0;
16943 
16944 	if (tg3_10_100_only_device(tp, ent))
16945 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16946 
16947 	err = tg3_phy_probe(tp);
16948 	if (err) {
16949 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16950 		/* ... but do not return immediately ... */
16951 		tg3_mdio_fini(tp);
16952 	}
16953 
16954 	tg3_read_vpd(tp);
16955 	tg3_read_fw_ver(tp);
16956 
16957 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16958 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16959 	} else {
16960 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16961 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16962 		else
16963 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16964 	}
16965 
16966 	/* 5700 {AX,BX} chips have a broken status block link
16967 	 * change bit implementation, so we must use the
16968 	 * status register in those cases.
16969 	 */
16970 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16971 		tg3_flag_set(tp, USE_LINKCHG_REG);
16972 	else
16973 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16974 
16975 	/* The led_ctrl is set during tg3_phy_probe, here we might
16976 	 * have to force the link status polling mechanism based
16977 	 * upon subsystem IDs.
16978 	 */
16979 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16980 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16981 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16982 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16983 		tg3_flag_set(tp, USE_LINKCHG_REG);
16984 	}
16985 
16986 	/* For all SERDES we poll the MAC status register. */
16987 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16988 		tg3_flag_set(tp, POLL_SERDES);
16989 	else
16990 		tg3_flag_clear(tp, POLL_SERDES);
16991 
16992 	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16993 		tg3_flag_set(tp, POLL_CPMU_LINK);
16994 
16995 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16996 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16997 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16998 	    tg3_flag(tp, PCIX_MODE)) {
16999 		tp->rx_offset = NET_SKB_PAD;
17000 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
17001 		tp->rx_copy_thresh = ~(u16)0;
17002 #endif
17003 	}
17004 
17005 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
17006 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
17007 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
17008 
17009 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
17010 
17011 	/* Increment the rx prod index on the rx std ring by at most
17012 	 * 8 for these chips to workaround hw errata.
17013 	 */
17014 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
17015 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
17016 	    tg3_asic_rev(tp) == ASIC_REV_5755)
17017 		tp->rx_std_max_post = 8;
17018 
17019 	if (tg3_flag(tp, ASPM_WORKAROUND))
17020 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
17021 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
17022 
17023 	return err;
17024 }
17025 
tg3_get_device_address(struct tg3 * tp)17026 static int tg3_get_device_address(struct tg3 *tp)
17027 {
17028 	struct net_device *dev = tp->dev;
17029 	u32 hi, lo, mac_offset;
17030 	int addr_ok = 0;
17031 	int err;
17032 
17033 	if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
17034 		return 0;
17035 
17036 	if (tg3_flag(tp, IS_SSB_CORE)) {
17037 		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
17038 		if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
17039 			return 0;
17040 	}
17041 
17042 	mac_offset = 0x7c;
17043 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17044 	    tg3_flag(tp, 5780_CLASS)) {
17045 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17046 			mac_offset = 0xcc;
17047 		if (tg3_nvram_lock(tp))
17048 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17049 		else
17050 			tg3_nvram_unlock(tp);
17051 	} else if (tg3_flag(tp, 5717_PLUS)) {
17052 		if (tp->pci_fn & 1)
17053 			mac_offset = 0xcc;
17054 		if (tp->pci_fn > 1)
17055 			mac_offset += 0x18c;
17056 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17057 		mac_offset = 0x10;
17058 
17059 	/* First try to get it from MAC address mailbox. */
17060 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17061 	if ((hi >> 16) == 0x484b) {
17062 		dev->dev_addr[0] = (hi >>  8) & 0xff;
17063 		dev->dev_addr[1] = (hi >>  0) & 0xff;
17064 
17065 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17066 		dev->dev_addr[2] = (lo >> 24) & 0xff;
17067 		dev->dev_addr[3] = (lo >> 16) & 0xff;
17068 		dev->dev_addr[4] = (lo >>  8) & 0xff;
17069 		dev->dev_addr[5] = (lo >>  0) & 0xff;
17070 
17071 		/* Some old bootcode may report a 0 MAC address in SRAM */
17072 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17073 	}
17074 	if (!addr_ok) {
17075 		/* Next, try NVRAM. */
17076 		if (!tg3_flag(tp, NO_NVRAM) &&
17077 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17078 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17079 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17080 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17081 		}
17082 		/* Finally just fetch it out of the MAC control regs. */
17083 		else {
17084 			hi = tr32(MAC_ADDR_0_HIGH);
17085 			lo = tr32(MAC_ADDR_0_LOW);
17086 
17087 			dev->dev_addr[5] = lo & 0xff;
17088 			dev->dev_addr[4] = (lo >> 8) & 0xff;
17089 			dev->dev_addr[3] = (lo >> 16) & 0xff;
17090 			dev->dev_addr[2] = (lo >> 24) & 0xff;
17091 			dev->dev_addr[1] = hi & 0xff;
17092 			dev->dev_addr[0] = (hi >> 8) & 0xff;
17093 		}
17094 	}
17095 
17096 	if (!is_valid_ether_addr(&dev->dev_addr[0]))
17097 		return -EINVAL;
17098 	return 0;
17099 }
17100 
17101 #define BOUNDARY_SINGLE_CACHELINE	1
17102 #define BOUNDARY_MULTI_CACHELINE	2
17103 
tg3_calc_dma_bndry(struct tg3 * tp,u32 val)17104 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17105 {
17106 	int cacheline_size;
17107 	u8 byte;
17108 	int goal;
17109 
17110 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17111 	if (byte == 0)
17112 		cacheline_size = 1024;
17113 	else
17114 		cacheline_size = (int) byte * 4;
17115 
17116 	/* On 5703 and later chips, the boundary bits have no
17117 	 * effect.
17118 	 */
17119 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17120 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
17121 	    !tg3_flag(tp, PCI_EXPRESS))
17122 		goto out;
17123 
17124 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17125 	goal = BOUNDARY_MULTI_CACHELINE;
17126 #else
17127 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17128 	goal = BOUNDARY_SINGLE_CACHELINE;
17129 #else
17130 	goal = 0;
17131 #endif
17132 #endif
17133 
17134 	if (tg3_flag(tp, 57765_PLUS)) {
17135 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17136 		goto out;
17137 	}
17138 
17139 	if (!goal)
17140 		goto out;
17141 
17142 	/* PCI controllers on most RISC systems tend to disconnect
17143 	 * when a device tries to burst across a cache-line boundary.
17144 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17145 	 *
17146 	 * Unfortunately, for PCI-E there are only limited
17147 	 * write-side controls for this, and thus for reads
17148 	 * we will still get the disconnects.  We'll also waste
17149 	 * these PCI cycles for both read and write for chips
17150 	 * other than 5700 and 5701 which do not implement the
17151 	 * boundary bits.
17152 	 */
17153 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17154 		switch (cacheline_size) {
17155 		case 16:
17156 		case 32:
17157 		case 64:
17158 		case 128:
17159 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17160 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17161 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17162 			} else {
17163 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17164 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17165 			}
17166 			break;
17167 
17168 		case 256:
17169 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17170 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17171 			break;
17172 
17173 		default:
17174 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17175 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17176 			break;
17177 		}
17178 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
17179 		switch (cacheline_size) {
17180 		case 16:
17181 		case 32:
17182 		case 64:
17183 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17184 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17185 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17186 				break;
17187 			}
17188 			fallthrough;
17189 		case 128:
17190 		default:
17191 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17192 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17193 			break;
17194 		}
17195 	} else {
17196 		switch (cacheline_size) {
17197 		case 16:
17198 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17199 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
17200 					DMA_RWCTRL_WRITE_BNDRY_16);
17201 				break;
17202 			}
17203 			fallthrough;
17204 		case 32:
17205 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17206 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
17207 					DMA_RWCTRL_WRITE_BNDRY_32);
17208 				break;
17209 			}
17210 			fallthrough;
17211 		case 64:
17212 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17213 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
17214 					DMA_RWCTRL_WRITE_BNDRY_64);
17215 				break;
17216 			}
17217 			fallthrough;
17218 		case 128:
17219 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17220 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
17221 					DMA_RWCTRL_WRITE_BNDRY_128);
17222 				break;
17223 			}
17224 			fallthrough;
17225 		case 256:
17226 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
17227 				DMA_RWCTRL_WRITE_BNDRY_256);
17228 			break;
17229 		case 512:
17230 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
17231 				DMA_RWCTRL_WRITE_BNDRY_512);
17232 			break;
17233 		case 1024:
17234 		default:
17235 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17236 				DMA_RWCTRL_WRITE_BNDRY_1024);
17237 			break;
17238 		}
17239 	}
17240 
17241 out:
17242 	return val;
17243 }
17244 
tg3_do_test_dma(struct tg3 * tp,u32 * buf,dma_addr_t buf_dma,int size,bool to_device)17245 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17246 			   int size, bool to_device)
17247 {
17248 	struct tg3_internal_buffer_desc test_desc;
17249 	u32 sram_dma_descs;
17250 	int i, ret;
17251 
17252 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17253 
17254 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17255 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17256 	tw32(RDMAC_STATUS, 0);
17257 	tw32(WDMAC_STATUS, 0);
17258 
17259 	tw32(BUFMGR_MODE, 0);
17260 	tw32(FTQ_RESET, 0);
17261 
17262 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
17263 	test_desc.addr_lo = buf_dma & 0xffffffff;
17264 	test_desc.nic_mbuf = 0x00002100;
17265 	test_desc.len = size;
17266 
17267 	/*
17268 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17269 	 * the *second* time the tg3 driver was getting loaded after an
17270 	 * initial scan.
17271 	 *
17272 	 * Broadcom tells me:
17273 	 *   ...the DMA engine is connected to the GRC block and a DMA
17274 	 *   reset may affect the GRC block in some unpredictable way...
17275 	 *   The behavior of resets to individual blocks has not been tested.
17276 	 *
17277 	 * Broadcom noted the GRC reset will also reset all sub-components.
17278 	 */
17279 	if (to_device) {
17280 		test_desc.cqid_sqid = (13 << 8) | 2;
17281 
17282 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17283 		udelay(40);
17284 	} else {
17285 		test_desc.cqid_sqid = (16 << 8) | 7;
17286 
17287 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17288 		udelay(40);
17289 	}
17290 	test_desc.flags = 0x00000005;
17291 
17292 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17293 		u32 val;
17294 
17295 		val = *(((u32 *)&test_desc) + i);
17296 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17297 				       sram_dma_descs + (i * sizeof(u32)));
17298 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17299 	}
17300 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17301 
17302 	if (to_device)
17303 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17304 	else
17305 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17306 
17307 	ret = -ENODEV;
17308 	for (i = 0; i < 40; i++) {
17309 		u32 val;
17310 
17311 		if (to_device)
17312 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17313 		else
17314 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17315 		if ((val & 0xffff) == sram_dma_descs) {
17316 			ret = 0;
17317 			break;
17318 		}
17319 
17320 		udelay(100);
17321 	}
17322 
17323 	return ret;
17324 }
17325 
17326 #define TEST_BUFFER_SIZE	0x2000
17327 
17328 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17329 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17330 	{ },
17331 };
17332 
tg3_test_dma(struct tg3 * tp)17333 static int tg3_test_dma(struct tg3 *tp)
17334 {
17335 	dma_addr_t buf_dma;
17336 	u32 *buf, saved_dma_rwctrl;
17337 	int ret = 0;
17338 
17339 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17340 				 &buf_dma, GFP_KERNEL);
17341 	if (!buf) {
17342 		ret = -ENOMEM;
17343 		goto out_nofree;
17344 	}
17345 
17346 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17347 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17348 
17349 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17350 
17351 	if (tg3_flag(tp, 57765_PLUS))
17352 		goto out;
17353 
17354 	if (tg3_flag(tp, PCI_EXPRESS)) {
17355 		/* DMA read watermark not used on PCIE */
17356 		tp->dma_rwctrl |= 0x00180000;
17357 	} else if (!tg3_flag(tp, PCIX_MODE)) {
17358 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17359 		    tg3_asic_rev(tp) == ASIC_REV_5750)
17360 			tp->dma_rwctrl |= 0x003f0000;
17361 		else
17362 			tp->dma_rwctrl |= 0x003f000f;
17363 	} else {
17364 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17365 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17366 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17367 			u32 read_water = 0x7;
17368 
17369 			/* If the 5704 is behind the EPB bridge, we can
17370 			 * do the less restrictive ONE_DMA workaround for
17371 			 * better performance.
17372 			 */
17373 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17374 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17375 				tp->dma_rwctrl |= 0x8000;
17376 			else if (ccval == 0x6 || ccval == 0x7)
17377 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17378 
17379 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17380 				read_water = 4;
17381 			/* Set bit 23 to enable PCIX hw bug fix */
17382 			tp->dma_rwctrl |=
17383 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17384 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17385 				(1 << 23);
17386 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17387 			/* 5780 always in PCIX mode */
17388 			tp->dma_rwctrl |= 0x00144000;
17389 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17390 			/* 5714 always in PCIX mode */
17391 			tp->dma_rwctrl |= 0x00148000;
17392 		} else {
17393 			tp->dma_rwctrl |= 0x001b000f;
17394 		}
17395 	}
17396 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17397 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17398 
17399 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17400 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17401 		tp->dma_rwctrl &= 0xfffffff0;
17402 
17403 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17404 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17405 		/* Remove this if it causes problems for some boards. */
17406 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17407 
17408 		/* On 5700/5701 chips, we need to set this bit.
17409 		 * Otherwise the chip will issue cacheline transactions
17410 		 * to streamable DMA memory with not all the byte
17411 		 * enables turned on.  This is an error on several
17412 		 * RISC PCI controllers, in particular sparc64.
17413 		 *
17414 		 * On 5703/5704 chips, this bit has been reassigned
17415 		 * a different meaning.  In particular, it is used
17416 		 * on those chips to enable a PCI-X workaround.
17417 		 */
17418 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17419 	}
17420 
17421 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17422 
17423 
17424 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17425 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17426 		goto out;
17427 
17428 	/* It is best to perform DMA test with maximum write burst size
17429 	 * to expose the 5700/5701 write DMA bug.
17430 	 */
17431 	saved_dma_rwctrl = tp->dma_rwctrl;
17432 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17433 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17434 
17435 	while (1) {
17436 		u32 *p = buf, i;
17437 
17438 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17439 			p[i] = i;
17440 
17441 		/* Send the buffer to the chip. */
17442 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17443 		if (ret) {
17444 			dev_err(&tp->pdev->dev,
17445 				"%s: Buffer write failed. err = %d\n",
17446 				__func__, ret);
17447 			break;
17448 		}
17449 
17450 		/* Now read it back. */
17451 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17452 		if (ret) {
17453 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17454 				"err = %d\n", __func__, ret);
17455 			break;
17456 		}
17457 
17458 		/* Verify it. */
17459 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17460 			if (p[i] == i)
17461 				continue;
17462 
17463 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17464 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17465 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17466 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17467 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17468 				break;
17469 			} else {
17470 				dev_err(&tp->pdev->dev,
17471 					"%s: Buffer corrupted on read back! "
17472 					"(%d != %d)\n", __func__, p[i], i);
17473 				ret = -ENODEV;
17474 				goto out;
17475 			}
17476 		}
17477 
17478 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17479 			/* Success. */
17480 			ret = 0;
17481 			break;
17482 		}
17483 	}
17484 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17485 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17486 		/* DMA test passed without adjusting DMA boundary,
17487 		 * now look for chipsets that are known to expose the
17488 		 * DMA bug without failing the test.
17489 		 */
17490 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17491 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17492 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17493 		} else {
17494 			/* Safe to use the calculated DMA boundary. */
17495 			tp->dma_rwctrl = saved_dma_rwctrl;
17496 		}
17497 
17498 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17499 	}
17500 
17501 out:
17502 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17503 out_nofree:
17504 	return ret;
17505 }
17506 
tg3_init_bufmgr_config(struct tg3 * tp)17507 static void tg3_init_bufmgr_config(struct tg3 *tp)
17508 {
17509 	if (tg3_flag(tp, 57765_PLUS)) {
17510 		tp->bufmgr_config.mbuf_read_dma_low_water =
17511 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17512 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17513 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17514 		tp->bufmgr_config.mbuf_high_water =
17515 			DEFAULT_MB_HIGH_WATER_57765;
17516 
17517 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17518 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17519 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17520 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17521 		tp->bufmgr_config.mbuf_high_water_jumbo =
17522 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17523 	} else if (tg3_flag(tp, 5705_PLUS)) {
17524 		tp->bufmgr_config.mbuf_read_dma_low_water =
17525 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17526 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17527 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17528 		tp->bufmgr_config.mbuf_high_water =
17529 			DEFAULT_MB_HIGH_WATER_5705;
17530 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17531 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17532 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17533 			tp->bufmgr_config.mbuf_high_water =
17534 				DEFAULT_MB_HIGH_WATER_5906;
17535 		}
17536 
17537 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17538 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17539 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17540 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17541 		tp->bufmgr_config.mbuf_high_water_jumbo =
17542 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17543 	} else {
17544 		tp->bufmgr_config.mbuf_read_dma_low_water =
17545 			DEFAULT_MB_RDMA_LOW_WATER;
17546 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17547 			DEFAULT_MB_MACRX_LOW_WATER;
17548 		tp->bufmgr_config.mbuf_high_water =
17549 			DEFAULT_MB_HIGH_WATER;
17550 
17551 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17552 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17553 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17554 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17555 		tp->bufmgr_config.mbuf_high_water_jumbo =
17556 			DEFAULT_MB_HIGH_WATER_JUMBO;
17557 	}
17558 
17559 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17560 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17561 }
17562 
tg3_phy_string(struct tg3 * tp)17563 static char *tg3_phy_string(struct tg3 *tp)
17564 {
17565 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17566 	case TG3_PHY_ID_BCM5400:	return "5400";
17567 	case TG3_PHY_ID_BCM5401:	return "5401";
17568 	case TG3_PHY_ID_BCM5411:	return "5411";
17569 	case TG3_PHY_ID_BCM5701:	return "5701";
17570 	case TG3_PHY_ID_BCM5703:	return "5703";
17571 	case TG3_PHY_ID_BCM5704:	return "5704";
17572 	case TG3_PHY_ID_BCM5705:	return "5705";
17573 	case TG3_PHY_ID_BCM5750:	return "5750";
17574 	case TG3_PHY_ID_BCM5752:	return "5752";
17575 	case TG3_PHY_ID_BCM5714:	return "5714";
17576 	case TG3_PHY_ID_BCM5780:	return "5780";
17577 	case TG3_PHY_ID_BCM5755:	return "5755";
17578 	case TG3_PHY_ID_BCM5787:	return "5787";
17579 	case TG3_PHY_ID_BCM5784:	return "5784";
17580 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17581 	case TG3_PHY_ID_BCM5906:	return "5906";
17582 	case TG3_PHY_ID_BCM5761:	return "5761";
17583 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17584 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17585 	case TG3_PHY_ID_BCM57765:	return "57765";
17586 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17587 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17588 	case TG3_PHY_ID_BCM5762:	return "5762C";
17589 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17590 	case 0:			return "serdes";
17591 	default:		return "unknown";
17592 	}
17593 }
17594 
tg3_bus_string(struct tg3 * tp,char * str)17595 static char *tg3_bus_string(struct tg3 *tp, char *str)
17596 {
17597 	if (tg3_flag(tp, PCI_EXPRESS)) {
17598 		strcpy(str, "PCI Express");
17599 		return str;
17600 	} else if (tg3_flag(tp, PCIX_MODE)) {
17601 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17602 
17603 		strcpy(str, "PCIX:");
17604 
17605 		if ((clock_ctrl == 7) ||
17606 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17607 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17608 			strcat(str, "133MHz");
17609 		else if (clock_ctrl == 0)
17610 			strcat(str, "33MHz");
17611 		else if (clock_ctrl == 2)
17612 			strcat(str, "50MHz");
17613 		else if (clock_ctrl == 4)
17614 			strcat(str, "66MHz");
17615 		else if (clock_ctrl == 6)
17616 			strcat(str, "100MHz");
17617 	} else {
17618 		strcpy(str, "PCI:");
17619 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17620 			strcat(str, "66MHz");
17621 		else
17622 			strcat(str, "33MHz");
17623 	}
17624 	if (tg3_flag(tp, PCI_32BIT))
17625 		strcat(str, ":32-bit");
17626 	else
17627 		strcat(str, ":64-bit");
17628 	return str;
17629 }
17630 
tg3_init_coal(struct tg3 * tp)17631 static void tg3_init_coal(struct tg3 *tp)
17632 {
17633 	struct ethtool_coalesce *ec = &tp->coal;
17634 
17635 	memset(ec, 0, sizeof(*ec));
17636 	ec->cmd = ETHTOOL_GCOALESCE;
17637 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17638 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17639 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17640 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17641 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17642 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17643 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17644 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17645 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17646 
17647 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17648 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17649 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17650 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17651 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17652 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17653 	}
17654 
17655 	if (tg3_flag(tp, 5705_PLUS)) {
17656 		ec->rx_coalesce_usecs_irq = 0;
17657 		ec->tx_coalesce_usecs_irq = 0;
17658 		ec->stats_block_coalesce_usecs = 0;
17659 	}
17660 }
17661 
tg3_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)17662 static int tg3_init_one(struct pci_dev *pdev,
17663 				  const struct pci_device_id *ent)
17664 {
17665 	struct net_device *dev;
17666 	struct tg3 *tp;
17667 	int i, err;
17668 	u32 sndmbx, rcvmbx, intmbx;
17669 	char str[40];
17670 	u64 dma_mask, persist_dma_mask;
17671 	netdev_features_t features = 0;
17672 
17673 	err = pci_enable_device(pdev);
17674 	if (err) {
17675 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17676 		return err;
17677 	}
17678 
17679 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17680 	if (err) {
17681 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17682 		goto err_out_disable_pdev;
17683 	}
17684 
17685 	pci_set_master(pdev);
17686 
17687 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17688 	if (!dev) {
17689 		err = -ENOMEM;
17690 		goto err_out_free_res;
17691 	}
17692 
17693 	SET_NETDEV_DEV(dev, &pdev->dev);
17694 
17695 	tp = netdev_priv(dev);
17696 	tp->pdev = pdev;
17697 	tp->dev = dev;
17698 	tp->rx_mode = TG3_DEF_RX_MODE;
17699 	tp->tx_mode = TG3_DEF_TX_MODE;
17700 	tp->irq_sync = 1;
17701 	tp->pcierr_recovery = false;
17702 
17703 	if (tg3_debug > 0)
17704 		tp->msg_enable = tg3_debug;
17705 	else
17706 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17707 
17708 	if (pdev_is_ssb_gige_core(pdev)) {
17709 		tg3_flag_set(tp, IS_SSB_CORE);
17710 		if (ssb_gige_must_flush_posted_writes(pdev))
17711 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17712 		if (ssb_gige_one_dma_at_once(pdev))
17713 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17714 		if (ssb_gige_have_roboswitch(pdev)) {
17715 			tg3_flag_set(tp, USE_PHYLIB);
17716 			tg3_flag_set(tp, ROBOSWITCH);
17717 		}
17718 		if (ssb_gige_is_rgmii(pdev))
17719 			tg3_flag_set(tp, RGMII_MODE);
17720 	}
17721 
17722 	/* The word/byte swap controls here control register access byte
17723 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17724 	 * setting below.
17725 	 */
17726 	tp->misc_host_ctrl =
17727 		MISC_HOST_CTRL_MASK_PCI_INT |
17728 		MISC_HOST_CTRL_WORD_SWAP |
17729 		MISC_HOST_CTRL_INDIR_ACCESS |
17730 		MISC_HOST_CTRL_PCISTATE_RW;
17731 
17732 	/* The NONFRM (non-frame) byte/word swap controls take effect
17733 	 * on descriptor entries, anything which isn't packet data.
17734 	 *
17735 	 * The StrongARM chips on the board (one for tx, one for rx)
17736 	 * are running in big-endian mode.
17737 	 */
17738 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17739 			GRC_MODE_WSWAP_NONFRM_DATA);
17740 #ifdef __BIG_ENDIAN
17741 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17742 #endif
17743 	spin_lock_init(&tp->lock);
17744 	spin_lock_init(&tp->indirect_lock);
17745 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17746 
17747 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17748 	if (!tp->regs) {
17749 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17750 		err = -ENOMEM;
17751 		goto err_out_free_dev;
17752 	}
17753 
17754 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17755 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17756 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17757 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17758 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17759 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17760 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17761 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17762 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17763 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17764 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17765 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17766 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17767 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17768 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17769 		tg3_flag_set(tp, ENABLE_APE);
17770 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17771 		if (!tp->aperegs) {
17772 			dev_err(&pdev->dev,
17773 				"Cannot map APE registers, aborting\n");
17774 			err = -ENOMEM;
17775 			goto err_out_iounmap;
17776 		}
17777 	}
17778 
17779 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17780 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17781 
17782 	dev->ethtool_ops = &tg3_ethtool_ops;
17783 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17784 	dev->netdev_ops = &tg3_netdev_ops;
17785 	dev->irq = pdev->irq;
17786 
17787 	err = tg3_get_invariants(tp, ent);
17788 	if (err) {
17789 		dev_err(&pdev->dev,
17790 			"Problem fetching invariants of chip, aborting\n");
17791 		goto err_out_apeunmap;
17792 	}
17793 
17794 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17795 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17796 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17797 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17798 	 * do DMA address check in tg3_start_xmit().
17799 	 */
17800 	if (tg3_flag(tp, IS_5788))
17801 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17802 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17803 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17804 #ifdef CONFIG_HIGHMEM
17805 		dma_mask = DMA_BIT_MASK(64);
17806 #endif
17807 	} else
17808 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17809 
17810 	/* Configure DMA attributes. */
17811 	if (dma_mask > DMA_BIT_MASK(32)) {
17812 		err = pci_set_dma_mask(pdev, dma_mask);
17813 		if (!err) {
17814 			features |= NETIF_F_HIGHDMA;
17815 			err = pci_set_consistent_dma_mask(pdev,
17816 							  persist_dma_mask);
17817 			if (err < 0) {
17818 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17819 					"DMA for consistent allocations\n");
17820 				goto err_out_apeunmap;
17821 			}
17822 		}
17823 	}
17824 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17825 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17826 		if (err) {
17827 			dev_err(&pdev->dev,
17828 				"No usable DMA configuration, aborting\n");
17829 			goto err_out_apeunmap;
17830 		}
17831 	}
17832 
17833 	tg3_init_bufmgr_config(tp);
17834 
17835 	/* 5700 B0 chips do not support checksumming correctly due
17836 	 * to hardware bugs.
17837 	 */
17838 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17839 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17840 
17841 		if (tg3_flag(tp, 5755_PLUS))
17842 			features |= NETIF_F_IPV6_CSUM;
17843 	}
17844 
17845 	/* TSO is on by default on chips that support hardware TSO.
17846 	 * Firmware TSO on older chips gives lower performance, so it
17847 	 * is off by default, but can be enabled using ethtool.
17848 	 */
17849 	if ((tg3_flag(tp, HW_TSO_1) ||
17850 	     tg3_flag(tp, HW_TSO_2) ||
17851 	     tg3_flag(tp, HW_TSO_3)) &&
17852 	    (features & NETIF_F_IP_CSUM))
17853 		features |= NETIF_F_TSO;
17854 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17855 		if (features & NETIF_F_IPV6_CSUM)
17856 			features |= NETIF_F_TSO6;
17857 		if (tg3_flag(tp, HW_TSO_3) ||
17858 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17859 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17860 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17861 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17862 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17863 			features |= NETIF_F_TSO_ECN;
17864 	}
17865 
17866 	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17867 			 NETIF_F_HW_VLAN_CTAG_RX;
17868 	dev->vlan_features |= features;
17869 
17870 	/*
17871 	 * Add loopback capability only for a subset of devices that support
17872 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17873 	 * loopback for the remaining devices.
17874 	 */
17875 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17876 	    !tg3_flag(tp, CPMU_PRESENT))
17877 		/* Add the loopback capability */
17878 		features |= NETIF_F_LOOPBACK;
17879 
17880 	dev->hw_features |= features;
17881 	dev->priv_flags |= IFF_UNICAST_FLT;
17882 
17883 	/* MTU range: 60 - 9000 or 1500, depending on hardware */
17884 	dev->min_mtu = TG3_MIN_MTU;
17885 	dev->max_mtu = TG3_MAX_MTU(tp);
17886 
17887 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17888 	    !tg3_flag(tp, TSO_CAPABLE) &&
17889 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17890 		tg3_flag_set(tp, MAX_RXPEND_64);
17891 		tp->rx_pending = 63;
17892 	}
17893 
17894 	err = tg3_get_device_address(tp);
17895 	if (err) {
17896 		dev_err(&pdev->dev,
17897 			"Could not obtain valid ethernet address, aborting\n");
17898 		goto err_out_apeunmap;
17899 	}
17900 
17901 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17902 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17903 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17904 	for (i = 0; i < tp->irq_max; i++) {
17905 		struct tg3_napi *tnapi = &tp->napi[i];
17906 
17907 		tnapi->tp = tp;
17908 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17909 
17910 		tnapi->int_mbox = intmbx;
17911 		if (i <= 4)
17912 			intmbx += 0x8;
17913 		else
17914 			intmbx += 0x4;
17915 
17916 		tnapi->consmbox = rcvmbx;
17917 		tnapi->prodmbox = sndmbx;
17918 
17919 		if (i)
17920 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17921 		else
17922 			tnapi->coal_now = HOSTCC_MODE_NOW;
17923 
17924 		if (!tg3_flag(tp, SUPPORT_MSIX))
17925 			break;
17926 
17927 		/*
17928 		 * If we support MSIX, we'll be using RSS.  If we're using
17929 		 * RSS, the first vector only handles link interrupts and the
17930 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17931 		 * mailbox values for the next iteration.  The values we setup
17932 		 * above are still useful for the single vectored mode.
17933 		 */
17934 		if (!i)
17935 			continue;
17936 
17937 		rcvmbx += 0x8;
17938 
17939 		if (sndmbx & 0x4)
17940 			sndmbx -= 0x4;
17941 		else
17942 			sndmbx += 0xc;
17943 	}
17944 
17945 	/*
17946 	 * Reset chip in case UNDI or EFI driver did not shutdown
17947 	 * DMA self test will enable WDMAC and we'll see (spurious)
17948 	 * pending DMA on the PCI bus at that point.
17949 	 */
17950 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17951 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17952 		tg3_full_lock(tp, 0);
17953 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17954 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17955 		tg3_full_unlock(tp);
17956 	}
17957 
17958 	err = tg3_test_dma(tp);
17959 	if (err) {
17960 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17961 		goto err_out_apeunmap;
17962 	}
17963 
17964 	tg3_init_coal(tp);
17965 
17966 	pci_set_drvdata(pdev, dev);
17967 
17968 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17969 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17970 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17971 		tg3_flag_set(tp, PTP_CAPABLE);
17972 
17973 	tg3_timer_init(tp);
17974 
17975 	tg3_carrier_off(tp);
17976 
17977 	err = register_netdev(dev);
17978 	if (err) {
17979 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17980 		goto err_out_apeunmap;
17981 	}
17982 
17983 	if (tg3_flag(tp, PTP_CAPABLE)) {
17984 		tg3_ptp_init(tp);
17985 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17986 						   &tp->pdev->dev);
17987 		if (IS_ERR(tp->ptp_clock))
17988 			tp->ptp_clock = NULL;
17989 	}
17990 
17991 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17992 		    tp->board_part_number,
17993 		    tg3_chip_rev_id(tp),
17994 		    tg3_bus_string(tp, str),
17995 		    dev->dev_addr);
17996 
17997 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17998 		char *ethtype;
17999 
18000 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
18001 			ethtype = "10/100Base-TX";
18002 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
18003 			ethtype = "1000Base-SX";
18004 		else
18005 			ethtype = "10/100/1000Base-T";
18006 
18007 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
18008 			    "(WireSpeed[%d], EEE[%d])\n",
18009 			    tg3_phy_string(tp), ethtype,
18010 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
18011 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
18012 	}
18013 
18014 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
18015 		    (dev->features & NETIF_F_RXCSUM) != 0,
18016 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
18017 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
18018 		    tg3_flag(tp, ENABLE_ASF) != 0,
18019 		    tg3_flag(tp, TSO_CAPABLE) != 0);
18020 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18021 		    tp->dma_rwctrl,
18022 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18023 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18024 
18025 	pci_save_state(pdev);
18026 
18027 	return 0;
18028 
18029 err_out_apeunmap:
18030 	if (tp->aperegs) {
18031 		iounmap(tp->aperegs);
18032 		tp->aperegs = NULL;
18033 	}
18034 
18035 err_out_iounmap:
18036 	if (tp->regs) {
18037 		iounmap(tp->regs);
18038 		tp->regs = NULL;
18039 	}
18040 
18041 err_out_free_dev:
18042 	free_netdev(dev);
18043 
18044 err_out_free_res:
18045 	pci_release_regions(pdev);
18046 
18047 err_out_disable_pdev:
18048 	if (pci_is_enabled(pdev))
18049 		pci_disable_device(pdev);
18050 	return err;
18051 }
18052 
tg3_remove_one(struct pci_dev * pdev)18053 static void tg3_remove_one(struct pci_dev *pdev)
18054 {
18055 	struct net_device *dev = pci_get_drvdata(pdev);
18056 
18057 	if (dev) {
18058 		struct tg3 *tp = netdev_priv(dev);
18059 
18060 		tg3_ptp_fini(tp);
18061 
18062 		release_firmware(tp->fw);
18063 
18064 		tg3_reset_task_cancel(tp);
18065 
18066 		if (tg3_flag(tp, USE_PHYLIB)) {
18067 			tg3_phy_fini(tp);
18068 			tg3_mdio_fini(tp);
18069 		}
18070 
18071 		unregister_netdev(dev);
18072 		if (tp->aperegs) {
18073 			iounmap(tp->aperegs);
18074 			tp->aperegs = NULL;
18075 		}
18076 		if (tp->regs) {
18077 			iounmap(tp->regs);
18078 			tp->regs = NULL;
18079 		}
18080 		free_netdev(dev);
18081 		pci_release_regions(pdev);
18082 		pci_disable_device(pdev);
18083 	}
18084 }
18085 
18086 #ifdef CONFIG_PM_SLEEP
tg3_suspend(struct device * device)18087 static int tg3_suspend(struct device *device)
18088 {
18089 	struct net_device *dev = dev_get_drvdata(device);
18090 	struct tg3 *tp = netdev_priv(dev);
18091 	int err = 0;
18092 
18093 	rtnl_lock();
18094 
18095 	if (!netif_running(dev))
18096 		goto unlock;
18097 
18098 	tg3_reset_task_cancel(tp);
18099 	tg3_phy_stop(tp);
18100 	tg3_netif_stop(tp);
18101 
18102 	tg3_timer_stop(tp);
18103 
18104 	tg3_full_lock(tp, 1);
18105 	tg3_disable_ints(tp);
18106 	tg3_full_unlock(tp);
18107 
18108 	netif_device_detach(dev);
18109 
18110 	tg3_full_lock(tp, 0);
18111 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18112 	tg3_flag_clear(tp, INIT_COMPLETE);
18113 	tg3_full_unlock(tp);
18114 
18115 	err = tg3_power_down_prepare(tp);
18116 	if (err) {
18117 		int err2;
18118 
18119 		tg3_full_lock(tp, 0);
18120 
18121 		tg3_flag_set(tp, INIT_COMPLETE);
18122 		err2 = tg3_restart_hw(tp, true);
18123 		if (err2)
18124 			goto out;
18125 
18126 		tg3_timer_start(tp);
18127 
18128 		netif_device_attach(dev);
18129 		tg3_netif_start(tp);
18130 
18131 out:
18132 		tg3_full_unlock(tp);
18133 
18134 		if (!err2)
18135 			tg3_phy_start(tp);
18136 	}
18137 
18138 unlock:
18139 	rtnl_unlock();
18140 	return err;
18141 }
18142 
tg3_resume(struct device * device)18143 static int tg3_resume(struct device *device)
18144 {
18145 	struct net_device *dev = dev_get_drvdata(device);
18146 	struct tg3 *tp = netdev_priv(dev);
18147 	int err = 0;
18148 
18149 	rtnl_lock();
18150 
18151 	if (!netif_running(dev))
18152 		goto unlock;
18153 
18154 	netif_device_attach(dev);
18155 
18156 	tg3_full_lock(tp, 0);
18157 
18158 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18159 
18160 	tg3_flag_set(tp, INIT_COMPLETE);
18161 	err = tg3_restart_hw(tp,
18162 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18163 	if (err)
18164 		goto out;
18165 
18166 	tg3_timer_start(tp);
18167 
18168 	tg3_netif_start(tp);
18169 
18170 out:
18171 	tg3_full_unlock(tp);
18172 
18173 	if (!err)
18174 		tg3_phy_start(tp);
18175 
18176 unlock:
18177 	rtnl_unlock();
18178 	return err;
18179 }
18180 #endif /* CONFIG_PM_SLEEP */
18181 
18182 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18183 
tg3_shutdown(struct pci_dev * pdev)18184 static void tg3_shutdown(struct pci_dev *pdev)
18185 {
18186 	struct net_device *dev = pci_get_drvdata(pdev);
18187 	struct tg3 *tp = netdev_priv(dev);
18188 
18189 	tg3_reset_task_cancel(tp);
18190 
18191 	rtnl_lock();
18192 
18193 	netif_device_detach(dev);
18194 
18195 	if (netif_running(dev))
18196 		dev_close(dev);
18197 
18198 	if (system_state == SYSTEM_POWER_OFF)
18199 		tg3_power_down(tp);
18200 
18201 	rtnl_unlock();
18202 
18203 	pci_disable_device(pdev);
18204 }
18205 
18206 /**
18207  * tg3_io_error_detected - called when PCI error is detected
18208  * @pdev: Pointer to PCI device
18209  * @state: The current pci connection state
18210  *
18211  * This function is called after a PCI bus error affecting
18212  * this device has been detected.
18213  */
tg3_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)18214 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18215 					      pci_channel_state_t state)
18216 {
18217 	struct net_device *netdev = pci_get_drvdata(pdev);
18218 	struct tg3 *tp = netdev_priv(netdev);
18219 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18220 
18221 	netdev_info(netdev, "PCI I/O error detected\n");
18222 
18223 	/* Want to make sure that the reset task doesn't run */
18224 	tg3_reset_task_cancel(tp);
18225 
18226 	rtnl_lock();
18227 
18228 	/* Could be second call or maybe we don't have netdev yet */
18229 	if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18230 		goto done;
18231 
18232 	/* We needn't recover from permanent error */
18233 	if (state == pci_channel_io_frozen)
18234 		tp->pcierr_recovery = true;
18235 
18236 	tg3_phy_stop(tp);
18237 
18238 	tg3_netif_stop(tp);
18239 
18240 	tg3_timer_stop(tp);
18241 
18242 	netif_device_detach(netdev);
18243 
18244 	/* Clean up software state, even if MMIO is blocked */
18245 	tg3_full_lock(tp, 0);
18246 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18247 	tg3_full_unlock(tp);
18248 
18249 done:
18250 	if (state == pci_channel_io_perm_failure) {
18251 		if (netdev) {
18252 			tg3_napi_enable(tp);
18253 			dev_close(netdev);
18254 		}
18255 		err = PCI_ERS_RESULT_DISCONNECT;
18256 	} else {
18257 		pci_disable_device(pdev);
18258 	}
18259 
18260 	rtnl_unlock();
18261 
18262 	return err;
18263 }
18264 
18265 /**
18266  * tg3_io_slot_reset - called after the pci bus has been reset.
18267  * @pdev: Pointer to PCI device
18268  *
18269  * Restart the card from scratch, as if from a cold-boot.
18270  * At this point, the card has exprienced a hard reset,
18271  * followed by fixups by BIOS, and has its config space
18272  * set up identically to what it was at cold boot.
18273  */
tg3_io_slot_reset(struct pci_dev * pdev)18274 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18275 {
18276 	struct net_device *netdev = pci_get_drvdata(pdev);
18277 	struct tg3 *tp = netdev_priv(netdev);
18278 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18279 	int err;
18280 
18281 	rtnl_lock();
18282 
18283 	if (pci_enable_device(pdev)) {
18284 		dev_err(&pdev->dev,
18285 			"Cannot re-enable PCI device after reset.\n");
18286 		goto done;
18287 	}
18288 
18289 	pci_set_master(pdev);
18290 	pci_restore_state(pdev);
18291 	pci_save_state(pdev);
18292 
18293 	if (!netdev || !netif_running(netdev)) {
18294 		rc = PCI_ERS_RESULT_RECOVERED;
18295 		goto done;
18296 	}
18297 
18298 	err = tg3_power_up(tp);
18299 	if (err)
18300 		goto done;
18301 
18302 	rc = PCI_ERS_RESULT_RECOVERED;
18303 
18304 done:
18305 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18306 		tg3_napi_enable(tp);
18307 		dev_close(netdev);
18308 	}
18309 	rtnl_unlock();
18310 
18311 	return rc;
18312 }
18313 
18314 /**
18315  * tg3_io_resume - called when traffic can start flowing again.
18316  * @pdev: Pointer to PCI device
18317  *
18318  * This callback is called when the error recovery driver tells
18319  * us that its OK to resume normal operation.
18320  */
tg3_io_resume(struct pci_dev * pdev)18321 static void tg3_io_resume(struct pci_dev *pdev)
18322 {
18323 	struct net_device *netdev = pci_get_drvdata(pdev);
18324 	struct tg3 *tp = netdev_priv(netdev);
18325 	int err;
18326 
18327 	rtnl_lock();
18328 
18329 	if (!netdev || !netif_running(netdev))
18330 		goto done;
18331 
18332 	tg3_full_lock(tp, 0);
18333 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18334 	tg3_flag_set(tp, INIT_COMPLETE);
18335 	err = tg3_restart_hw(tp, true);
18336 	if (err) {
18337 		tg3_full_unlock(tp);
18338 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
18339 		goto done;
18340 	}
18341 
18342 	netif_device_attach(netdev);
18343 
18344 	tg3_timer_start(tp);
18345 
18346 	tg3_netif_start(tp);
18347 
18348 	tg3_full_unlock(tp);
18349 
18350 	tg3_phy_start(tp);
18351 
18352 done:
18353 	tp->pcierr_recovery = false;
18354 	rtnl_unlock();
18355 }
18356 
18357 static const struct pci_error_handlers tg3_err_handler = {
18358 	.error_detected	= tg3_io_error_detected,
18359 	.slot_reset	= tg3_io_slot_reset,
18360 	.resume		= tg3_io_resume
18361 };
18362 
18363 static struct pci_driver tg3_driver = {
18364 	.name		= DRV_MODULE_NAME,
18365 	.id_table	= tg3_pci_tbl,
18366 	.probe		= tg3_init_one,
18367 	.remove		= tg3_remove_one,
18368 	.err_handler	= &tg3_err_handler,
18369 	.driver.pm	= &tg3_pm_ops,
18370 	.shutdown	= tg3_shutdown,
18371 };
18372 
18373 module_pci_driver(tg3_driver);
18374