• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * r8169.c: RealTek 8169/8168/8101 ethernet driver.
3  *
4  * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5  * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6  * Copyright (c) a lot of people too. Please respect their work.
7  *
8  * See MAINTAINERS file for support contact information.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
21 #include <linux/in.h>
22 #include <linux/ip.h>
23 #include <linux/tcp.h>
24 #include <linux/interrupt.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/firmware.h>
28 #include <linux/pci-aspm.h>
29 #include <linux/prefetch.h>
30 #include <linux/ipv6.h>
31 #include <net/ip6_checksum.h>
32 
33 #include <asm/io.h>
34 #include <asm/irq.h>
35 
36 #define RTL8169_VERSION "2.3LK-NAPI"
37 #define MODULENAME "r8169"
38 #define PFX MODULENAME ": "
39 
40 #define FIRMWARE_8168D_1	"rtl_nic/rtl8168d-1.fw"
41 #define FIRMWARE_8168D_2	"rtl_nic/rtl8168d-2.fw"
42 #define FIRMWARE_8168E_1	"rtl_nic/rtl8168e-1.fw"
43 #define FIRMWARE_8168E_2	"rtl_nic/rtl8168e-2.fw"
44 #define FIRMWARE_8168E_3	"rtl_nic/rtl8168e-3.fw"
45 #define FIRMWARE_8168F_1	"rtl_nic/rtl8168f-1.fw"
46 #define FIRMWARE_8168F_2	"rtl_nic/rtl8168f-2.fw"
47 #define FIRMWARE_8105E_1	"rtl_nic/rtl8105e-1.fw"
48 #define FIRMWARE_8402_1		"rtl_nic/rtl8402-1.fw"
49 #define FIRMWARE_8411_1		"rtl_nic/rtl8411-1.fw"
50 #define FIRMWARE_8411_2		"rtl_nic/rtl8411-2.fw"
51 #define FIRMWARE_8106E_1	"rtl_nic/rtl8106e-1.fw"
52 #define FIRMWARE_8106E_2	"rtl_nic/rtl8106e-2.fw"
53 #define FIRMWARE_8168G_2	"rtl_nic/rtl8168g-2.fw"
54 #define FIRMWARE_8168G_3	"rtl_nic/rtl8168g-3.fw"
55 #define FIRMWARE_8168H_1	"rtl_nic/rtl8168h-1.fw"
56 #define FIRMWARE_8168H_2	"rtl_nic/rtl8168h-2.fw"
57 #define FIRMWARE_8107E_1	"rtl_nic/rtl8107e-1.fw"
58 #define FIRMWARE_8107E_2	"rtl_nic/rtl8107e-2.fw"
59 
60 #ifdef RTL8169_DEBUG
61 #define assert(expr) \
62 	if (!(expr)) {					\
63 		printk( "Assertion failed! %s,%s,%s,line=%d\n",	\
64 		#expr,__FILE__,__func__,__LINE__);		\
65 	}
66 #define dprintk(fmt, args...) \
67 	do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
68 #else
69 #define assert(expr) do {} while (0)
70 #define dprintk(fmt, args...)	do {} while (0)
71 #endif /* RTL8169_DEBUG */
72 
73 #define R8169_MSG_DEFAULT \
74 	(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
75 
76 #define TX_SLOTS_AVAIL(tp) \
77 	(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
78 
79 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
80 #define TX_FRAGS_READY_FOR(tp,nr_frags) \
81 	(TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
82 
83 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
84    The RTL chips use a 64 element hash table based on the Ethernet CRC. */
85 static const int multicast_filter_limit = 32;
86 
87 #define MAX_READ_REQUEST_SHIFT	12
88 #define TX_DMA_BURST	7	/* Maximum PCI burst, '7' is unlimited */
89 #define InterFrameGap	0x03	/* 3 means InterFrameGap = the shortest one */
90 
91 #define R8169_REGS_SIZE		256
92 #define R8169_NAPI_WEIGHT	64
93 #define NUM_TX_DESC	64	/* Number of Tx descriptor registers */
94 #define NUM_RX_DESC	256U	/* Number of Rx descriptor registers */
95 #define R8169_TX_RING_BYTES	(NUM_TX_DESC * sizeof(struct TxDesc))
96 #define R8169_RX_RING_BYTES	(NUM_RX_DESC * sizeof(struct RxDesc))
97 
98 #define RTL8169_TX_TIMEOUT	(6*HZ)
99 #define RTL8169_PHY_TIMEOUT	(10*HZ)
100 
101 /* write/read MMIO register */
102 #define RTL_W8(reg, val8)	writeb ((val8), ioaddr + (reg))
103 #define RTL_W16(reg, val16)	writew ((val16), ioaddr + (reg))
104 #define RTL_W32(reg, val32)	writel ((val32), ioaddr + (reg))
105 #define RTL_R8(reg)		readb (ioaddr + (reg))
106 #define RTL_R16(reg)		readw (ioaddr + (reg))
107 #define RTL_R32(reg)		readl (ioaddr + (reg))
108 
109 enum mac_version {
110 	RTL_GIGA_MAC_VER_01 = 0,
111 	RTL_GIGA_MAC_VER_02,
112 	RTL_GIGA_MAC_VER_03,
113 	RTL_GIGA_MAC_VER_04,
114 	RTL_GIGA_MAC_VER_05,
115 	RTL_GIGA_MAC_VER_06,
116 	RTL_GIGA_MAC_VER_07,
117 	RTL_GIGA_MAC_VER_08,
118 	RTL_GIGA_MAC_VER_09,
119 	RTL_GIGA_MAC_VER_10,
120 	RTL_GIGA_MAC_VER_11,
121 	RTL_GIGA_MAC_VER_12,
122 	RTL_GIGA_MAC_VER_13,
123 	RTL_GIGA_MAC_VER_14,
124 	RTL_GIGA_MAC_VER_15,
125 	RTL_GIGA_MAC_VER_16,
126 	RTL_GIGA_MAC_VER_17,
127 	RTL_GIGA_MAC_VER_18,
128 	RTL_GIGA_MAC_VER_19,
129 	RTL_GIGA_MAC_VER_20,
130 	RTL_GIGA_MAC_VER_21,
131 	RTL_GIGA_MAC_VER_22,
132 	RTL_GIGA_MAC_VER_23,
133 	RTL_GIGA_MAC_VER_24,
134 	RTL_GIGA_MAC_VER_25,
135 	RTL_GIGA_MAC_VER_26,
136 	RTL_GIGA_MAC_VER_27,
137 	RTL_GIGA_MAC_VER_28,
138 	RTL_GIGA_MAC_VER_29,
139 	RTL_GIGA_MAC_VER_30,
140 	RTL_GIGA_MAC_VER_31,
141 	RTL_GIGA_MAC_VER_32,
142 	RTL_GIGA_MAC_VER_33,
143 	RTL_GIGA_MAC_VER_34,
144 	RTL_GIGA_MAC_VER_35,
145 	RTL_GIGA_MAC_VER_36,
146 	RTL_GIGA_MAC_VER_37,
147 	RTL_GIGA_MAC_VER_38,
148 	RTL_GIGA_MAC_VER_39,
149 	RTL_GIGA_MAC_VER_40,
150 	RTL_GIGA_MAC_VER_41,
151 	RTL_GIGA_MAC_VER_42,
152 	RTL_GIGA_MAC_VER_43,
153 	RTL_GIGA_MAC_VER_44,
154 	RTL_GIGA_MAC_VER_45,
155 	RTL_GIGA_MAC_VER_46,
156 	RTL_GIGA_MAC_VER_47,
157 	RTL_GIGA_MAC_VER_48,
158 	RTL_GIGA_MAC_VER_49,
159 	RTL_GIGA_MAC_VER_50,
160 	RTL_GIGA_MAC_VER_51,
161 	RTL_GIGA_MAC_NONE   = 0xff,
162 };
163 
164 enum rtl_tx_desc_version {
165 	RTL_TD_0	= 0,
166 	RTL_TD_1	= 1,
167 };
168 
169 #define JUMBO_1K	ETH_DATA_LEN
170 #define JUMBO_4K	(4*1024 - ETH_HLEN - 2)
171 #define JUMBO_6K	(6*1024 - ETH_HLEN - 2)
172 #define JUMBO_7K	(7*1024 - ETH_HLEN - 2)
173 #define JUMBO_9K	(9*1024 - ETH_HLEN - 2)
174 
175 #define _R(NAME,TD,FW,SZ,B) {	\
176 	.name = NAME,		\
177 	.txd_version = TD,	\
178 	.fw_name = FW,		\
179 	.jumbo_max = SZ,	\
180 	.jumbo_tx_csum = B	\
181 }
182 
183 static const struct {
184 	const char *name;
185 	enum rtl_tx_desc_version txd_version;
186 	const char *fw_name;
187 	u16 jumbo_max;
188 	bool jumbo_tx_csum;
189 } rtl_chip_infos[] = {
190 	/* PCI devices. */
191 	[RTL_GIGA_MAC_VER_01] =
192 		_R("RTL8169",		RTL_TD_0, NULL, JUMBO_7K, true),
193 	[RTL_GIGA_MAC_VER_02] =
194 		_R("RTL8169s",		RTL_TD_0, NULL, JUMBO_7K, true),
195 	[RTL_GIGA_MAC_VER_03] =
196 		_R("RTL8110s",		RTL_TD_0, NULL, JUMBO_7K, true),
197 	[RTL_GIGA_MAC_VER_04] =
198 		_R("RTL8169sb/8110sb",	RTL_TD_0, NULL, JUMBO_7K, true),
199 	[RTL_GIGA_MAC_VER_05] =
200 		_R("RTL8169sc/8110sc",	RTL_TD_0, NULL, JUMBO_7K, true),
201 	[RTL_GIGA_MAC_VER_06] =
202 		_R("RTL8169sc/8110sc",	RTL_TD_0, NULL, JUMBO_7K, true),
203 	/* PCI-E devices. */
204 	[RTL_GIGA_MAC_VER_07] =
205 		_R("RTL8102e",		RTL_TD_1, NULL, JUMBO_1K, true),
206 	[RTL_GIGA_MAC_VER_08] =
207 		_R("RTL8102e",		RTL_TD_1, NULL, JUMBO_1K, true),
208 	[RTL_GIGA_MAC_VER_09] =
209 		_R("RTL8102e",		RTL_TD_1, NULL, JUMBO_1K, true),
210 	[RTL_GIGA_MAC_VER_10] =
211 		_R("RTL8101e",		RTL_TD_0, NULL, JUMBO_1K, true),
212 	[RTL_GIGA_MAC_VER_11] =
213 		_R("RTL8168b/8111b",	RTL_TD_0, NULL, JUMBO_4K, false),
214 	[RTL_GIGA_MAC_VER_12] =
215 		_R("RTL8168b/8111b",	RTL_TD_0, NULL, JUMBO_4K, false),
216 	[RTL_GIGA_MAC_VER_13] =
217 		_R("RTL8101e",		RTL_TD_0, NULL, JUMBO_1K, true),
218 	[RTL_GIGA_MAC_VER_14] =
219 		_R("RTL8100e",		RTL_TD_0, NULL, JUMBO_1K, true),
220 	[RTL_GIGA_MAC_VER_15] =
221 		_R("RTL8100e",		RTL_TD_0, NULL, JUMBO_1K, true),
222 	[RTL_GIGA_MAC_VER_16] =
223 		_R("RTL8101e",		RTL_TD_0, NULL, JUMBO_1K, true),
224 	[RTL_GIGA_MAC_VER_17] =
225 		_R("RTL8168b/8111b",	RTL_TD_0, NULL, JUMBO_4K, false),
226 	[RTL_GIGA_MAC_VER_18] =
227 		_R("RTL8168cp/8111cp",	RTL_TD_1, NULL, JUMBO_6K, false),
228 	[RTL_GIGA_MAC_VER_19] =
229 		_R("RTL8168c/8111c",	RTL_TD_1, NULL, JUMBO_6K, false),
230 	[RTL_GIGA_MAC_VER_20] =
231 		_R("RTL8168c/8111c",	RTL_TD_1, NULL, JUMBO_6K, false),
232 	[RTL_GIGA_MAC_VER_21] =
233 		_R("RTL8168c/8111c",	RTL_TD_1, NULL, JUMBO_6K, false),
234 	[RTL_GIGA_MAC_VER_22] =
235 		_R("RTL8168c/8111c",	RTL_TD_1, NULL, JUMBO_6K, false),
236 	[RTL_GIGA_MAC_VER_23] =
237 		_R("RTL8168cp/8111cp",	RTL_TD_1, NULL, JUMBO_6K, false),
238 	[RTL_GIGA_MAC_VER_24] =
239 		_R("RTL8168cp/8111cp",	RTL_TD_1, NULL, JUMBO_6K, false),
240 	[RTL_GIGA_MAC_VER_25] =
241 		_R("RTL8168d/8111d",	RTL_TD_1, FIRMWARE_8168D_1,
242 							JUMBO_9K, false),
243 	[RTL_GIGA_MAC_VER_26] =
244 		_R("RTL8168d/8111d",	RTL_TD_1, FIRMWARE_8168D_2,
245 							JUMBO_9K, false),
246 	[RTL_GIGA_MAC_VER_27] =
247 		_R("RTL8168dp/8111dp",	RTL_TD_1, NULL, JUMBO_9K, false),
248 	[RTL_GIGA_MAC_VER_28] =
249 		_R("RTL8168dp/8111dp",	RTL_TD_1, NULL, JUMBO_9K, false),
250 	[RTL_GIGA_MAC_VER_29] =
251 		_R("RTL8105e",		RTL_TD_1, FIRMWARE_8105E_1,
252 							JUMBO_1K, true),
253 	[RTL_GIGA_MAC_VER_30] =
254 		_R("RTL8105e",		RTL_TD_1, FIRMWARE_8105E_1,
255 							JUMBO_1K, true),
256 	[RTL_GIGA_MAC_VER_31] =
257 		_R("RTL8168dp/8111dp",	RTL_TD_1, NULL, JUMBO_9K, false),
258 	[RTL_GIGA_MAC_VER_32] =
259 		_R("RTL8168e/8111e",	RTL_TD_1, FIRMWARE_8168E_1,
260 							JUMBO_9K, false),
261 	[RTL_GIGA_MAC_VER_33] =
262 		_R("RTL8168e/8111e",	RTL_TD_1, FIRMWARE_8168E_2,
263 							JUMBO_9K, false),
264 	[RTL_GIGA_MAC_VER_34] =
265 		_R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
266 							JUMBO_9K, false),
267 	[RTL_GIGA_MAC_VER_35] =
268 		_R("RTL8168f/8111f",	RTL_TD_1, FIRMWARE_8168F_1,
269 							JUMBO_9K, false),
270 	[RTL_GIGA_MAC_VER_36] =
271 		_R("RTL8168f/8111f",	RTL_TD_1, FIRMWARE_8168F_2,
272 							JUMBO_9K, false),
273 	[RTL_GIGA_MAC_VER_37] =
274 		_R("RTL8402",		RTL_TD_1, FIRMWARE_8402_1,
275 							JUMBO_1K, true),
276 	[RTL_GIGA_MAC_VER_38] =
277 		_R("RTL8411",		RTL_TD_1, FIRMWARE_8411_1,
278 							JUMBO_9K, false),
279 	[RTL_GIGA_MAC_VER_39] =
280 		_R("RTL8106e",		RTL_TD_1, FIRMWARE_8106E_1,
281 							JUMBO_1K, true),
282 	[RTL_GIGA_MAC_VER_40] =
283 		_R("RTL8168g/8111g",	RTL_TD_1, FIRMWARE_8168G_2,
284 							JUMBO_9K, false),
285 	[RTL_GIGA_MAC_VER_41] =
286 		_R("RTL8168g/8111g",	RTL_TD_1, NULL, JUMBO_9K, false),
287 	[RTL_GIGA_MAC_VER_42] =
288 		_R("RTL8168g/8111g",	RTL_TD_1, FIRMWARE_8168G_3,
289 							JUMBO_9K, false),
290 	[RTL_GIGA_MAC_VER_43] =
291 		_R("RTL8106e",		RTL_TD_1, FIRMWARE_8106E_2,
292 							JUMBO_1K, true),
293 	[RTL_GIGA_MAC_VER_44] =
294 		_R("RTL8411",		RTL_TD_1, FIRMWARE_8411_2,
295 							JUMBO_9K, false),
296 	[RTL_GIGA_MAC_VER_45] =
297 		_R("RTL8168h/8111h",	RTL_TD_1, FIRMWARE_8168H_1,
298 							JUMBO_9K, false),
299 	[RTL_GIGA_MAC_VER_46] =
300 		_R("RTL8168h/8111h",	RTL_TD_1, FIRMWARE_8168H_2,
301 							JUMBO_9K, false),
302 	[RTL_GIGA_MAC_VER_47] =
303 		_R("RTL8107e",		RTL_TD_1, FIRMWARE_8107E_1,
304 							JUMBO_1K, false),
305 	[RTL_GIGA_MAC_VER_48] =
306 		_R("RTL8107e",		RTL_TD_1, FIRMWARE_8107E_2,
307 							JUMBO_1K, false),
308 	[RTL_GIGA_MAC_VER_49] =
309 		_R("RTL8168ep/8111ep",	RTL_TD_1, NULL,
310 							JUMBO_9K, false),
311 	[RTL_GIGA_MAC_VER_50] =
312 		_R("RTL8168ep/8111ep",	RTL_TD_1, NULL,
313 							JUMBO_9K, false),
314 	[RTL_GIGA_MAC_VER_51] =
315 		_R("RTL8168ep/8111ep",	RTL_TD_1, NULL,
316 							JUMBO_9K, false),
317 };
318 #undef _R
319 
320 enum cfg_version {
321 	RTL_CFG_0 = 0x00,
322 	RTL_CFG_1,
323 	RTL_CFG_2
324 };
325 
326 static const struct pci_device_id rtl8169_pci_tbl[] = {
327 	{ PCI_VDEVICE(REALTEK,	0x2502), RTL_CFG_1 },
328 	{ PCI_VDEVICE(REALTEK,	0x2600), RTL_CFG_1 },
329 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8129), 0, 0, RTL_CFG_0 },
330 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8136), 0, 0, RTL_CFG_2 },
331 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8161), 0, 0, RTL_CFG_1 },
332 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8167), 0, 0, RTL_CFG_0 },
333 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8168), 0, 0, RTL_CFG_1 },
334 	{ PCI_DEVICE(PCI_VENDOR_ID_NCUBE,	0x8168), 0, 0, RTL_CFG_1 },
335 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8169), 0, 0, RTL_CFG_0 },
336 	{ PCI_VENDOR_ID_DLINK,			0x4300,
337 		PCI_VENDOR_ID_DLINK, 0x4b10,		 0, 0, RTL_CFG_1 },
338 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK,	0x4300), 0, 0, RTL_CFG_0 },
339 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK,	0x4302), 0, 0, RTL_CFG_0 },
340 	{ PCI_DEVICE(PCI_VENDOR_ID_AT,		0xc107), 0, 0, RTL_CFG_0 },
341 	{ PCI_DEVICE(0x16ec,			0x0116), 0, 0, RTL_CFG_0 },
342 	{ PCI_VENDOR_ID_LINKSYS,		0x1032,
343 		PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
344 	{ 0x0001,				0x8168,
345 		PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
346 	{0,},
347 };
348 
349 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
350 
351 static int rx_buf_sz = 16383;
352 static int use_dac = -1;
353 static struct {
354 	u32 msg_enable;
355 } debug = { -1 };
356 
357 enum rtl_registers {
358 	MAC0		= 0,	/* Ethernet hardware address. */
359 	MAC4		= 4,
360 	MAR0		= 8,	/* Multicast filter. */
361 	CounterAddrLow		= 0x10,
362 	CounterAddrHigh		= 0x14,
363 	TxDescStartAddrLow	= 0x20,
364 	TxDescStartAddrHigh	= 0x24,
365 	TxHDescStartAddrLow	= 0x28,
366 	TxHDescStartAddrHigh	= 0x2c,
367 	FLASH		= 0x30,
368 	ERSR		= 0x36,
369 	ChipCmd		= 0x37,
370 	TxPoll		= 0x38,
371 	IntrMask	= 0x3c,
372 	IntrStatus	= 0x3e,
373 
374 	TxConfig	= 0x40,
375 #define	TXCFG_AUTO_FIFO			(1 << 7)	/* 8111e-vl */
376 #define	TXCFG_EMPTY			(1 << 11)	/* 8111e-vl */
377 
378 	RxConfig	= 0x44,
379 #define	RX128_INT_EN			(1 << 15)	/* 8111c and later */
380 #define	RX_MULTI_EN			(1 << 14)	/* 8111c only */
381 #define	RXCFG_FIFO_SHIFT		13
382 					/* No threshold before first PCI xfer */
383 #define	RX_FIFO_THRESH			(7 << RXCFG_FIFO_SHIFT)
384 #define	RX_EARLY_OFF			(1 << 11)
385 #define	RXCFG_DMA_SHIFT			8
386 					/* Unlimited maximum PCI burst. */
387 #define	RX_DMA_BURST			(7 << RXCFG_DMA_SHIFT)
388 
389 	RxMissed	= 0x4c,
390 	Cfg9346		= 0x50,
391 	Config0		= 0x51,
392 	Config1		= 0x52,
393 	Config2		= 0x53,
394 #define PME_SIGNAL			(1 << 5)	/* 8168c and later */
395 
396 	Config3		= 0x54,
397 	Config4		= 0x55,
398 	Config5		= 0x56,
399 	MultiIntr	= 0x5c,
400 	PHYAR		= 0x60,
401 	PHYstatus	= 0x6c,
402 	RxMaxSize	= 0xda,
403 	CPlusCmd	= 0xe0,
404 	IntrMitigate	= 0xe2,
405 	RxDescAddrLow	= 0xe4,
406 	RxDescAddrHigh	= 0xe8,
407 	EarlyTxThres	= 0xec,	/* 8169. Unit of 32 bytes. */
408 
409 #define NoEarlyTx	0x3f	/* Max value : no early transmit. */
410 
411 	MaxTxPacketSize	= 0xec,	/* 8101/8168. Unit of 128 bytes. */
412 
413 #define TxPacketMax	(8064 >> 7)
414 #define EarlySize	0x27
415 
416 	FuncEvent	= 0xf0,
417 	FuncEventMask	= 0xf4,
418 	FuncPresetState	= 0xf8,
419 	IBCR0           = 0xf8,
420 	IBCR2           = 0xf9,
421 	IBIMR0          = 0xfa,
422 	IBISR0          = 0xfb,
423 	FuncForceEvent	= 0xfc,
424 };
425 
426 enum rtl8110_registers {
427 	TBICSR			= 0x64,
428 	TBI_ANAR		= 0x68,
429 	TBI_LPAR		= 0x6a,
430 };
431 
432 enum rtl8168_8101_registers {
433 	CSIDR			= 0x64,
434 	CSIAR			= 0x68,
435 #define	CSIAR_FLAG			0x80000000
436 #define	CSIAR_WRITE_CMD			0x80000000
437 #define	CSIAR_BYTE_ENABLE		0x0f
438 #define	CSIAR_BYTE_ENABLE_SHIFT		12
439 #define	CSIAR_ADDR_MASK			0x0fff
440 #define CSIAR_FUNC_CARD			0x00000000
441 #define CSIAR_FUNC_SDIO			0x00010000
442 #define CSIAR_FUNC_NIC			0x00020000
443 #define CSIAR_FUNC_NIC2			0x00010000
444 	PMCH			= 0x6f,
445 	EPHYAR			= 0x80,
446 #define	EPHYAR_FLAG			0x80000000
447 #define	EPHYAR_WRITE_CMD		0x80000000
448 #define	EPHYAR_REG_MASK			0x1f
449 #define	EPHYAR_REG_SHIFT		16
450 #define	EPHYAR_DATA_MASK		0xffff
451 	DLLPR			= 0xd0,
452 #define	PFM_EN				(1 << 6)
453 #define	TX_10M_PS_EN			(1 << 7)
454 	DBG_REG			= 0xd1,
455 #define	FIX_NAK_1			(1 << 4)
456 #define	FIX_NAK_2			(1 << 3)
457 	TWSI			= 0xd2,
458 	MCU			= 0xd3,
459 #define	NOW_IS_OOB			(1 << 7)
460 #define	TX_EMPTY			(1 << 5)
461 #define	RX_EMPTY			(1 << 4)
462 #define	RXTX_EMPTY			(TX_EMPTY | RX_EMPTY)
463 #define	EN_NDP				(1 << 3)
464 #define	EN_OOB_RESET			(1 << 2)
465 #define	LINK_LIST_RDY			(1 << 1)
466 	EFUSEAR			= 0xdc,
467 #define	EFUSEAR_FLAG			0x80000000
468 #define	EFUSEAR_WRITE_CMD		0x80000000
469 #define	EFUSEAR_READ_CMD		0x00000000
470 #define	EFUSEAR_REG_MASK		0x03ff
471 #define	EFUSEAR_REG_SHIFT		8
472 #define	EFUSEAR_DATA_MASK		0xff
473 	MISC_1			= 0xf2,
474 #define	PFM_D3COLD_EN			(1 << 6)
475 };
476 
477 enum rtl8168_registers {
478 	LED_FREQ		= 0x1a,
479 	EEE_LED			= 0x1b,
480 	ERIDR			= 0x70,
481 	ERIAR			= 0x74,
482 #define ERIAR_FLAG			0x80000000
483 #define ERIAR_WRITE_CMD			0x80000000
484 #define ERIAR_READ_CMD			0x00000000
485 #define ERIAR_ADDR_BYTE_ALIGN		4
486 #define ERIAR_TYPE_SHIFT		16
487 #define ERIAR_EXGMAC			(0x00 << ERIAR_TYPE_SHIFT)
488 #define ERIAR_MSIX			(0x01 << ERIAR_TYPE_SHIFT)
489 #define ERIAR_ASF			(0x02 << ERIAR_TYPE_SHIFT)
490 #define ERIAR_OOB			(0x02 << ERIAR_TYPE_SHIFT)
491 #define ERIAR_MASK_SHIFT		12
492 #define ERIAR_MASK_0001			(0x1 << ERIAR_MASK_SHIFT)
493 #define ERIAR_MASK_0011			(0x3 << ERIAR_MASK_SHIFT)
494 #define ERIAR_MASK_0100			(0x4 << ERIAR_MASK_SHIFT)
495 #define ERIAR_MASK_0101			(0x5 << ERIAR_MASK_SHIFT)
496 #define ERIAR_MASK_1111			(0xf << ERIAR_MASK_SHIFT)
497 	EPHY_RXER_NUM		= 0x7c,
498 	OCPDR			= 0xb0,	/* OCP GPHY access */
499 #define OCPDR_WRITE_CMD			0x80000000
500 #define OCPDR_READ_CMD			0x00000000
501 #define OCPDR_REG_MASK			0x7f
502 #define OCPDR_GPHY_REG_SHIFT		16
503 #define OCPDR_DATA_MASK			0xffff
504 	OCPAR			= 0xb4,
505 #define OCPAR_FLAG			0x80000000
506 #define OCPAR_GPHY_WRITE_CMD		0x8000f060
507 #define OCPAR_GPHY_READ_CMD		0x0000f060
508 	GPHY_OCP		= 0xb8,
509 	RDSAR1			= 0xd0,	/* 8168c only. Undocumented on 8168dp */
510 	MISC			= 0xf0,	/* 8168e only. */
511 #define TXPLA_RST			(1 << 29)
512 #define DISABLE_LAN_EN			(1 << 23) /* Enable GPIO pin */
513 #define PWM_EN				(1 << 22)
514 #define RXDV_GATED_EN			(1 << 19)
515 #define EARLY_TALLY_EN			(1 << 16)
516 };
517 
518 enum rtl_register_content {
519 	/* InterruptStatusBits */
520 	SYSErr		= 0x8000,
521 	PCSTimeout	= 0x4000,
522 	SWInt		= 0x0100,
523 	TxDescUnavail	= 0x0080,
524 	RxFIFOOver	= 0x0040,
525 	LinkChg		= 0x0020,
526 	RxOverflow	= 0x0010,
527 	TxErr		= 0x0008,
528 	TxOK		= 0x0004,
529 	RxErr		= 0x0002,
530 	RxOK		= 0x0001,
531 
532 	/* RxStatusDesc */
533 	RxBOVF	= (1 << 24),
534 	RxFOVF	= (1 << 23),
535 	RxRWT	= (1 << 22),
536 	RxRES	= (1 << 21),
537 	RxRUNT	= (1 << 20),
538 	RxCRC	= (1 << 19),
539 
540 	/* ChipCmdBits */
541 	StopReq		= 0x80,
542 	CmdReset	= 0x10,
543 	CmdRxEnb	= 0x08,
544 	CmdTxEnb	= 0x04,
545 	RxBufEmpty	= 0x01,
546 
547 	/* TXPoll register p.5 */
548 	HPQ		= 0x80,		/* Poll cmd on the high prio queue */
549 	NPQ		= 0x40,		/* Poll cmd on the low prio queue */
550 	FSWInt		= 0x01,		/* Forced software interrupt */
551 
552 	/* Cfg9346Bits */
553 	Cfg9346_Lock	= 0x00,
554 	Cfg9346_Unlock	= 0xc0,
555 
556 	/* rx_mode_bits */
557 	AcceptErr	= 0x20,
558 	AcceptRunt	= 0x10,
559 	AcceptBroadcast	= 0x08,
560 	AcceptMulticast	= 0x04,
561 	AcceptMyPhys	= 0x02,
562 	AcceptAllPhys	= 0x01,
563 #define RX_CONFIG_ACCEPT_MASK		0x3f
564 
565 	/* TxConfigBits */
566 	TxInterFrameGapShift = 24,
567 	TxDMAShift = 8,	/* DMA burst value (0-7) is shift this many bits */
568 
569 	/* Config1 register p.24 */
570 	LEDS1		= (1 << 7),
571 	LEDS0		= (1 << 6),
572 	Speed_down	= (1 << 4),
573 	MEMMAP		= (1 << 3),
574 	IOMAP		= (1 << 2),
575 	VPD		= (1 << 1),
576 	PMEnable	= (1 << 0),	/* Power Management Enable */
577 
578 	/* Config2 register p. 25 */
579 	ClkReqEn	= (1 << 7),	/* Clock Request Enable */
580 	MSIEnable	= (1 << 5),	/* 8169 only. Reserved in the 8168. */
581 	PCI_Clock_66MHz = 0x01,
582 	PCI_Clock_33MHz = 0x00,
583 
584 	/* Config3 register p.25 */
585 	MagicPacket	= (1 << 5),	/* Wake up when receives a Magic Packet */
586 	LinkUp		= (1 << 4),	/* Wake up when the cable connection is re-established */
587 	Jumbo_En0	= (1 << 2),	/* 8168 only. Reserved in the 8168b */
588 	Rdy_to_L23	= (1 << 1),	/* L23 Enable */
589 	Beacon_en	= (1 << 0),	/* 8168 only. Reserved in the 8168b */
590 
591 	/* Config4 register */
592 	Jumbo_En1	= (1 << 1),	/* 8168 only. Reserved in the 8168b */
593 
594 	/* Config5 register p.27 */
595 	BWF		= (1 << 6),	/* Accept Broadcast wakeup frame */
596 	MWF		= (1 << 5),	/* Accept Multicast wakeup frame */
597 	UWF		= (1 << 4),	/* Accept Unicast wakeup frame */
598 	Spi_en		= (1 << 3),
599 	LanWake		= (1 << 1),	/* LanWake enable/disable */
600 	PMEStatus	= (1 << 0),	/* PME status can be reset by PCI RST# */
601 	ASPM_en		= (1 << 0),	/* ASPM enable */
602 
603 	/* TBICSR p.28 */
604 	TBIReset	= 0x80000000,
605 	TBILoopback	= 0x40000000,
606 	TBINwEnable	= 0x20000000,
607 	TBINwRestart	= 0x10000000,
608 	TBILinkOk	= 0x02000000,
609 	TBINwComplete	= 0x01000000,
610 
611 	/* CPlusCmd p.31 */
612 	EnableBist	= (1 << 15),	// 8168 8101
613 	Mac_dbgo_oe	= (1 << 14),	// 8168 8101
614 	Normal_mode	= (1 << 13),	// unused
615 	Force_half_dup	= (1 << 12),	// 8168 8101
616 	Force_rxflow_en	= (1 << 11),	// 8168 8101
617 	Force_txflow_en	= (1 << 10),	// 8168 8101
618 	Cxpl_dbg_sel	= (1 << 9),	// 8168 8101
619 	ASF		= (1 << 8),	// 8168 8101
620 	PktCntrDisable	= (1 << 7),	// 8168 8101
621 	Mac_dbgo_sel	= 0x001c,	// 8168
622 	RxVlan		= (1 << 6),
623 	RxChkSum	= (1 << 5),
624 	PCIDAC		= (1 << 4),
625 	PCIMulRW	= (1 << 3),
626 	INTT_0		= 0x0000,	// 8168
627 	INTT_1		= 0x0001,	// 8168
628 	INTT_2		= 0x0002,	// 8168
629 	INTT_3		= 0x0003,	// 8168
630 
631 	/* rtl8169_PHYstatus */
632 	TBI_Enable	= 0x80,
633 	TxFlowCtrl	= 0x40,
634 	RxFlowCtrl	= 0x20,
635 	_1000bpsF	= 0x10,
636 	_100bps		= 0x08,
637 	_10bps		= 0x04,
638 	LinkStatus	= 0x02,
639 	FullDup		= 0x01,
640 
641 	/* _TBICSRBit */
642 	TBILinkOK	= 0x02000000,
643 
644 	/* ResetCounterCommand */
645 	CounterReset	= 0x1,
646 
647 	/* DumpCounterCommand */
648 	CounterDump	= 0x8,
649 
650 	/* magic enable v2 */
651 	MagicPacket_v2	= (1 << 16),	/* Wake up when receives a Magic Packet */
652 };
653 
654 enum rtl_desc_bit {
655 	/* First doubleword. */
656 	DescOwn		= (1 << 31), /* Descriptor is owned by NIC */
657 	RingEnd		= (1 << 30), /* End of descriptor ring */
658 	FirstFrag	= (1 << 29), /* First segment of a packet */
659 	LastFrag	= (1 << 28), /* Final segment of a packet */
660 };
661 
662 /* Generic case. */
663 enum rtl_tx_desc_bit {
664 	/* First doubleword. */
665 	TD_LSO		= (1 << 27),		/* Large Send Offload */
666 #define TD_MSS_MAX			0x07ffu	/* MSS value */
667 
668 	/* Second doubleword. */
669 	TxVlanTag	= (1 << 17),		/* Add VLAN tag */
670 };
671 
672 /* 8169, 8168b and 810x except 8102e. */
673 enum rtl_tx_desc_bit_0 {
674 	/* First doubleword. */
675 #define TD0_MSS_SHIFT			16	/* MSS position (11 bits) */
676 	TD0_TCP_CS	= (1 << 16),		/* Calculate TCP/IP checksum */
677 	TD0_UDP_CS	= (1 << 17),		/* Calculate UDP/IP checksum */
678 	TD0_IP_CS	= (1 << 18),		/* Calculate IP checksum */
679 };
680 
681 /* 8102e, 8168c and beyond. */
682 enum rtl_tx_desc_bit_1 {
683 	/* First doubleword. */
684 	TD1_GTSENV4	= (1 << 26),		/* Giant Send for IPv4 */
685 	TD1_GTSENV6	= (1 << 25),		/* Giant Send for IPv6 */
686 #define GTTCPHO_SHIFT			18
687 #define GTTCPHO_MAX			0x7fU
688 
689 	/* Second doubleword. */
690 #define TCPHO_SHIFT			18
691 #define TCPHO_MAX			0x3ffU
692 #define TD1_MSS_SHIFT			18	/* MSS position (11 bits) */
693 	TD1_IPv6_CS	= (1 << 28),		/* Calculate IPv6 checksum */
694 	TD1_IPv4_CS	= (1 << 29),		/* Calculate IPv4 checksum */
695 	TD1_TCP_CS	= (1 << 30),		/* Calculate TCP/IP checksum */
696 	TD1_UDP_CS	= (1 << 31),		/* Calculate UDP/IP checksum */
697 };
698 
699 enum rtl_rx_desc_bit {
700 	/* Rx private */
701 	PID1		= (1 << 18), /* Protocol ID bit 1/2 */
702 	PID0		= (1 << 17), /* Protocol ID bit 0/2 */
703 
704 #define RxProtoUDP	(PID1)
705 #define RxProtoTCP	(PID0)
706 #define RxProtoIP	(PID1 | PID0)
707 #define RxProtoMask	RxProtoIP
708 
709 	IPFail		= (1 << 16), /* IP checksum failed */
710 	UDPFail		= (1 << 15), /* UDP/IP checksum failed */
711 	TCPFail		= (1 << 14), /* TCP/IP checksum failed */
712 	RxVlanTag	= (1 << 16), /* VLAN tag available */
713 };
714 
715 #define RsvdMask	0x3fffc000
716 
717 struct TxDesc {
718 	__le32 opts1;
719 	__le32 opts2;
720 	__le64 addr;
721 };
722 
723 struct RxDesc {
724 	__le32 opts1;
725 	__le32 opts2;
726 	__le64 addr;
727 };
728 
729 struct ring_info {
730 	struct sk_buff	*skb;
731 	u32		len;
732 	u8		__pad[sizeof(void *) - sizeof(u32)];
733 };
734 
735 enum features {
736 	RTL_FEATURE_WOL		= (1 << 0),
737 	RTL_FEATURE_MSI		= (1 << 1),
738 	RTL_FEATURE_GMII	= (1 << 2),
739 };
740 
741 struct rtl8169_counters {
742 	__le64	tx_packets;
743 	__le64	rx_packets;
744 	__le64	tx_errors;
745 	__le32	rx_errors;
746 	__le16	rx_missed;
747 	__le16	align_errors;
748 	__le32	tx_one_collision;
749 	__le32	tx_multi_collision;
750 	__le64	rx_unicast;
751 	__le64	rx_broadcast;
752 	__le32	rx_multicast;
753 	__le16	tx_aborted;
754 	__le16	tx_underun;
755 };
756 
757 struct rtl8169_tc_offsets {
758 	bool	inited;
759 	__le64	tx_errors;
760 	__le32	tx_multi_collision;
761 	__le16	tx_aborted;
762 };
763 
764 enum rtl_flag {
765 	RTL_FLAG_TASK_ENABLED = 0,
766 	RTL_FLAG_TASK_SLOW_PENDING,
767 	RTL_FLAG_TASK_RESET_PENDING,
768 	RTL_FLAG_TASK_PHY_PENDING,
769 	RTL_FLAG_MAX
770 };
771 
772 struct rtl8169_stats {
773 	u64			packets;
774 	u64			bytes;
775 	struct u64_stats_sync	syncp;
776 };
777 
778 struct rtl8169_private {
779 	void __iomem *mmio_addr;	/* memory map physical address */
780 	struct pci_dev *pci_dev;
781 	struct net_device *dev;
782 	struct napi_struct napi;
783 	u32 msg_enable;
784 	u16 txd_version;
785 	u16 mac_version;
786 	u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
787 	u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
788 	u32 dirty_tx;
789 	struct rtl8169_stats rx_stats;
790 	struct rtl8169_stats tx_stats;
791 	struct TxDesc *TxDescArray;	/* 256-aligned Tx descriptor ring */
792 	struct RxDesc *RxDescArray;	/* 256-aligned Rx descriptor ring */
793 	dma_addr_t TxPhyAddr;
794 	dma_addr_t RxPhyAddr;
795 	void *Rx_databuff[NUM_RX_DESC];	/* Rx data buffers */
796 	struct ring_info tx_skb[NUM_TX_DESC];	/* Tx data buffers */
797 	struct timer_list timer;
798 	u16 cp_cmd;
799 
800 	u16 event_slow;
801 
802 	struct mdio_ops {
803 		void (*write)(struct rtl8169_private *, int, int);
804 		int (*read)(struct rtl8169_private *, int);
805 	} mdio_ops;
806 
807 	struct pll_power_ops {
808 		void (*down)(struct rtl8169_private *);
809 		void (*up)(struct rtl8169_private *);
810 	} pll_power_ops;
811 
812 	struct jumbo_ops {
813 		void (*enable)(struct rtl8169_private *);
814 		void (*disable)(struct rtl8169_private *);
815 	} jumbo_ops;
816 
817 	struct csi_ops {
818 		void (*write)(struct rtl8169_private *, int, int);
819 		u32 (*read)(struct rtl8169_private *, int);
820 	} csi_ops;
821 
822 	int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
823 	int (*get_link_ksettings)(struct net_device *,
824 				  struct ethtool_link_ksettings *);
825 	void (*phy_reset_enable)(struct rtl8169_private *tp);
826 	void (*hw_start)(struct net_device *);
827 	unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
828 	unsigned int (*link_ok)(void __iomem *);
829 	int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
830 	bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *);
831 
832 	struct {
833 		DECLARE_BITMAP(flags, RTL_FLAG_MAX);
834 		struct mutex mutex;
835 		struct work_struct work;
836 	} wk;
837 
838 	unsigned features;
839 
840 	struct mii_if_info mii;
841 	dma_addr_t counters_phys_addr;
842 	struct rtl8169_counters *counters;
843 	struct rtl8169_tc_offsets tc_offset;
844 	u32 saved_wolopts;
845 	u32 opts1_mask;
846 
847 	struct rtl_fw {
848 		const struct firmware *fw;
849 
850 #define RTL_VER_SIZE		32
851 
852 		char version[RTL_VER_SIZE];
853 
854 		struct rtl_fw_phy_action {
855 			__le32 *code;
856 			size_t size;
857 		} phy_action;
858 	} *rtl_fw;
859 #define RTL_FIRMWARE_UNKNOWN	ERR_PTR(-EAGAIN)
860 
861 	u32 ocp_base;
862 };
863 
864 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
865 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
866 module_param(use_dac, int, 0);
867 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
868 module_param_named(debug, debug.msg_enable, int, 0);
869 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
870 MODULE_LICENSE("GPL");
871 MODULE_VERSION(RTL8169_VERSION);
872 MODULE_FIRMWARE(FIRMWARE_8168D_1);
873 MODULE_FIRMWARE(FIRMWARE_8168D_2);
874 MODULE_FIRMWARE(FIRMWARE_8168E_1);
875 MODULE_FIRMWARE(FIRMWARE_8168E_2);
876 MODULE_FIRMWARE(FIRMWARE_8168E_3);
877 MODULE_FIRMWARE(FIRMWARE_8105E_1);
878 MODULE_FIRMWARE(FIRMWARE_8168F_1);
879 MODULE_FIRMWARE(FIRMWARE_8168F_2);
880 MODULE_FIRMWARE(FIRMWARE_8402_1);
881 MODULE_FIRMWARE(FIRMWARE_8411_1);
882 MODULE_FIRMWARE(FIRMWARE_8411_2);
883 MODULE_FIRMWARE(FIRMWARE_8106E_1);
884 MODULE_FIRMWARE(FIRMWARE_8106E_2);
885 MODULE_FIRMWARE(FIRMWARE_8168G_2);
886 MODULE_FIRMWARE(FIRMWARE_8168G_3);
887 MODULE_FIRMWARE(FIRMWARE_8168H_1);
888 MODULE_FIRMWARE(FIRMWARE_8168H_2);
889 MODULE_FIRMWARE(FIRMWARE_8107E_1);
890 MODULE_FIRMWARE(FIRMWARE_8107E_2);
891 
rtl_lock_work(struct rtl8169_private * tp)892 static void rtl_lock_work(struct rtl8169_private *tp)
893 {
894 	mutex_lock(&tp->wk.mutex);
895 }
896 
rtl_unlock_work(struct rtl8169_private * tp)897 static void rtl_unlock_work(struct rtl8169_private *tp)
898 {
899 	mutex_unlock(&tp->wk.mutex);
900 }
901 
rtl_tx_performance_tweak(struct pci_dev * pdev,u16 force)902 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
903 {
904 	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
905 					   PCI_EXP_DEVCTL_READRQ, force);
906 }
907 
908 struct rtl_cond {
909 	bool (*check)(struct rtl8169_private *);
910 	const char *msg;
911 };
912 
rtl_udelay(unsigned int d)913 static void rtl_udelay(unsigned int d)
914 {
915 	udelay(d);
916 }
917 
rtl_loop_wait(struct rtl8169_private * tp,const struct rtl_cond * c,void (* delay)(unsigned int),unsigned int d,int n,bool high)918 static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
919 			  void (*delay)(unsigned int), unsigned int d, int n,
920 			  bool high)
921 {
922 	int i;
923 
924 	for (i = 0; i < n; i++) {
925 		delay(d);
926 		if (c->check(tp) == high)
927 			return true;
928 	}
929 	netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
930 		  c->msg, !high, n, d);
931 	return false;
932 }
933 
rtl_udelay_loop_wait_high(struct rtl8169_private * tp,const struct rtl_cond * c,unsigned int d,int n)934 static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
935 				      const struct rtl_cond *c,
936 				      unsigned int d, int n)
937 {
938 	return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
939 }
940 
rtl_udelay_loop_wait_low(struct rtl8169_private * tp,const struct rtl_cond * c,unsigned int d,int n)941 static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
942 				     const struct rtl_cond *c,
943 				     unsigned int d, int n)
944 {
945 	return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
946 }
947 
rtl_msleep_loop_wait_high(struct rtl8169_private * tp,const struct rtl_cond * c,unsigned int d,int n)948 static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
949 				      const struct rtl_cond *c,
950 				      unsigned int d, int n)
951 {
952 	return rtl_loop_wait(tp, c, msleep, d, n, true);
953 }
954 
rtl_msleep_loop_wait_low(struct rtl8169_private * tp,const struct rtl_cond * c,unsigned int d,int n)955 static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
956 				     const struct rtl_cond *c,
957 				     unsigned int d, int n)
958 {
959 	return rtl_loop_wait(tp, c, msleep, d, n, false);
960 }
961 
962 #define DECLARE_RTL_COND(name)				\
963 static bool name ## _check(struct rtl8169_private *);	\
964 							\
965 static const struct rtl_cond name = {			\
966 	.check	= name ## _check,			\
967 	.msg	= #name					\
968 };							\
969 							\
970 static bool name ## _check(struct rtl8169_private *tp)
971 
rtl_ocp_reg_failure(struct rtl8169_private * tp,u32 reg)972 static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
973 {
974 	if (reg & 0xffff0001) {
975 		netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
976 		return true;
977 	}
978 	return false;
979 }
980 
DECLARE_RTL_COND(rtl_ocp_gphy_cond)981 DECLARE_RTL_COND(rtl_ocp_gphy_cond)
982 {
983 	void __iomem *ioaddr = tp->mmio_addr;
984 
985 	return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
986 }
987 
r8168_phy_ocp_write(struct rtl8169_private * tp,u32 reg,u32 data)988 static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
989 {
990 	void __iomem *ioaddr = tp->mmio_addr;
991 
992 	if (rtl_ocp_reg_failure(tp, reg))
993 		return;
994 
995 	RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
996 
997 	rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
998 }
999 
r8168_phy_ocp_read(struct rtl8169_private * tp,u32 reg)1000 static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1001 {
1002 	void __iomem *ioaddr = tp->mmio_addr;
1003 
1004 	if (rtl_ocp_reg_failure(tp, reg))
1005 		return 0;
1006 
1007 	RTL_W32(GPHY_OCP, reg << 15);
1008 
1009 	return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1010 		(RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1011 }
1012 
r8168_mac_ocp_write(struct rtl8169_private * tp,u32 reg,u32 data)1013 static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1014 {
1015 	void __iomem *ioaddr = tp->mmio_addr;
1016 
1017 	if (rtl_ocp_reg_failure(tp, reg))
1018 		return;
1019 
1020 	RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1021 }
1022 
r8168_mac_ocp_read(struct rtl8169_private * tp,u32 reg)1023 static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1024 {
1025 	void __iomem *ioaddr = tp->mmio_addr;
1026 
1027 	if (rtl_ocp_reg_failure(tp, reg))
1028 		return 0;
1029 
1030 	RTL_W32(OCPDR, reg << 15);
1031 
1032 	return RTL_R32(OCPDR);
1033 }
1034 
1035 #define OCP_STD_PHY_BASE	0xa400
1036 
r8168g_mdio_write(struct rtl8169_private * tp,int reg,int value)1037 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1038 {
1039 	if (reg == 0x1f) {
1040 		tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1041 		return;
1042 	}
1043 
1044 	if (tp->ocp_base != OCP_STD_PHY_BASE)
1045 		reg -= 0x10;
1046 
1047 	r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1048 }
1049 
r8168g_mdio_read(struct rtl8169_private * tp,int reg)1050 static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1051 {
1052 	if (tp->ocp_base != OCP_STD_PHY_BASE)
1053 		reg -= 0x10;
1054 
1055 	return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1056 }
1057 
mac_mcu_write(struct rtl8169_private * tp,int reg,int value)1058 static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value)
1059 {
1060 	if (reg == 0x1f) {
1061 		tp->ocp_base = value << 4;
1062 		return;
1063 	}
1064 
1065 	r8168_mac_ocp_write(tp, tp->ocp_base + reg, value);
1066 }
1067 
mac_mcu_read(struct rtl8169_private * tp,int reg)1068 static int mac_mcu_read(struct rtl8169_private *tp, int reg)
1069 {
1070 	return r8168_mac_ocp_read(tp, tp->ocp_base + reg);
1071 }
1072 
DECLARE_RTL_COND(rtl_phyar_cond)1073 DECLARE_RTL_COND(rtl_phyar_cond)
1074 {
1075 	void __iomem *ioaddr = tp->mmio_addr;
1076 
1077 	return RTL_R32(PHYAR) & 0x80000000;
1078 }
1079 
r8169_mdio_write(struct rtl8169_private * tp,int reg,int value)1080 static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1081 {
1082 	void __iomem *ioaddr = tp->mmio_addr;
1083 
1084 	RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1085 
1086 	rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1087 	/*
1088 	 * According to hardware specs a 20us delay is required after write
1089 	 * complete indication, but before sending next command.
1090 	 */
1091 	udelay(20);
1092 }
1093 
r8169_mdio_read(struct rtl8169_private * tp,int reg)1094 static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1095 {
1096 	void __iomem *ioaddr = tp->mmio_addr;
1097 	int value;
1098 
1099 	RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1100 
1101 	value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1102 		RTL_R32(PHYAR) & 0xffff : ~0;
1103 
1104 	/*
1105 	 * According to hardware specs a 20us delay is required after read
1106 	 * complete indication, but before sending next command.
1107 	 */
1108 	udelay(20);
1109 
1110 	return value;
1111 }
1112 
DECLARE_RTL_COND(rtl_ocpar_cond)1113 DECLARE_RTL_COND(rtl_ocpar_cond)
1114 {
1115 	void __iomem *ioaddr = tp->mmio_addr;
1116 
1117 	return RTL_R32(OCPAR) & OCPAR_FLAG;
1118 }
1119 
r8168dp_1_mdio_access(struct rtl8169_private * tp,int reg,u32 data)1120 static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
1121 {
1122 	void __iomem *ioaddr = tp->mmio_addr;
1123 
1124 	RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
1125 	RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
1126 	RTL_W32(EPHY_RXER_NUM, 0);
1127 
1128 	rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
1129 }
1130 
r8168dp_1_mdio_write(struct rtl8169_private * tp,int reg,int value)1131 static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
1132 {
1133 	r8168dp_1_mdio_access(tp, reg,
1134 			      OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
1135 }
1136 
r8168dp_1_mdio_read(struct rtl8169_private * tp,int reg)1137 static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
1138 {
1139 	void __iomem *ioaddr = tp->mmio_addr;
1140 
1141 	r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1142 
1143 	mdelay(1);
1144 	RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
1145 	RTL_W32(EPHY_RXER_NUM, 0);
1146 
1147 	return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
1148 		RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
1149 }
1150 
1151 #define R8168DP_1_MDIO_ACCESS_BIT	0x00020000
1152 
r8168dp_2_mdio_start(void __iomem * ioaddr)1153 static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1154 {
1155 	RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1156 }
1157 
r8168dp_2_mdio_stop(void __iomem * ioaddr)1158 static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1159 {
1160 	RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1161 }
1162 
r8168dp_2_mdio_write(struct rtl8169_private * tp,int reg,int value)1163 static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1164 {
1165 	void __iomem *ioaddr = tp->mmio_addr;
1166 
1167 	r8168dp_2_mdio_start(ioaddr);
1168 
1169 	r8169_mdio_write(tp, reg, value);
1170 
1171 	r8168dp_2_mdio_stop(ioaddr);
1172 }
1173 
r8168dp_2_mdio_read(struct rtl8169_private * tp,int reg)1174 static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1175 {
1176 	void __iomem *ioaddr = tp->mmio_addr;
1177 	int value;
1178 
1179 	r8168dp_2_mdio_start(ioaddr);
1180 
1181 	value = r8169_mdio_read(tp, reg);
1182 
1183 	r8168dp_2_mdio_stop(ioaddr);
1184 
1185 	return value;
1186 }
1187 
rtl_writephy(struct rtl8169_private * tp,int location,u32 val)1188 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1189 {
1190 	tp->mdio_ops.write(tp, location, val);
1191 }
1192 
rtl_readphy(struct rtl8169_private * tp,int location)1193 static int rtl_readphy(struct rtl8169_private *tp, int location)
1194 {
1195 	return tp->mdio_ops.read(tp, location);
1196 }
1197 
rtl_patchphy(struct rtl8169_private * tp,int reg_addr,int value)1198 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1199 {
1200 	rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1201 }
1202 
rtl_w0w1_phy(struct rtl8169_private * tp,int reg_addr,int p,int m)1203 static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1204 {
1205 	int val;
1206 
1207 	val = rtl_readphy(tp, reg_addr);
1208 	rtl_writephy(tp, reg_addr, (val & ~m) | p);
1209 }
1210 
rtl_mdio_write(struct net_device * dev,int phy_id,int location,int val)1211 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1212 			   int val)
1213 {
1214 	struct rtl8169_private *tp = netdev_priv(dev);
1215 
1216 	rtl_writephy(tp, location, val);
1217 }
1218 
rtl_mdio_read(struct net_device * dev,int phy_id,int location)1219 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1220 {
1221 	struct rtl8169_private *tp = netdev_priv(dev);
1222 
1223 	return rtl_readphy(tp, location);
1224 }
1225 
DECLARE_RTL_COND(rtl_ephyar_cond)1226 DECLARE_RTL_COND(rtl_ephyar_cond)
1227 {
1228 	void __iomem *ioaddr = tp->mmio_addr;
1229 
1230 	return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1231 }
1232 
rtl_ephy_write(struct rtl8169_private * tp,int reg_addr,int value)1233 static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1234 {
1235 	void __iomem *ioaddr = tp->mmio_addr;
1236 
1237 	RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1238 		(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1239 
1240 	rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1241 
1242 	udelay(10);
1243 }
1244 
rtl_ephy_read(struct rtl8169_private * tp,int reg_addr)1245 static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1246 {
1247 	void __iomem *ioaddr = tp->mmio_addr;
1248 
1249 	RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1250 
1251 	return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1252 		RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1253 }
1254 
DECLARE_RTL_COND(rtl_eriar_cond)1255 DECLARE_RTL_COND(rtl_eriar_cond)
1256 {
1257 	void __iomem *ioaddr = tp->mmio_addr;
1258 
1259 	return RTL_R32(ERIAR) & ERIAR_FLAG;
1260 }
1261 
rtl_eri_write(struct rtl8169_private * tp,int addr,u32 mask,u32 val,int type)1262 static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1263 			  u32 val, int type)
1264 {
1265 	void __iomem *ioaddr = tp->mmio_addr;
1266 
1267 	BUG_ON((addr & 3) || (mask == 0));
1268 	RTL_W32(ERIDR, val);
1269 	RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1270 
1271 	rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1272 }
1273 
rtl_eri_read(struct rtl8169_private * tp,int addr,int type)1274 static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1275 {
1276 	void __iomem *ioaddr = tp->mmio_addr;
1277 
1278 	RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1279 
1280 	return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1281 		RTL_R32(ERIDR) : ~0;
1282 }
1283 
rtl_w0w1_eri(struct rtl8169_private * tp,int addr,u32 mask,u32 p,u32 m,int type)1284 static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1285 			 u32 m, int type)
1286 {
1287 	u32 val;
1288 
1289 	val = rtl_eri_read(tp, addr, type);
1290 	rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1291 }
1292 
r8168dp_ocp_read(struct rtl8169_private * tp,u8 mask,u16 reg)1293 static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
1294 {
1295 	void __iomem *ioaddr = tp->mmio_addr;
1296 
1297 	RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
1298 	return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
1299 		RTL_R32(OCPDR) : ~0;
1300 }
1301 
r8168ep_ocp_read(struct rtl8169_private * tp,u8 mask,u16 reg)1302 static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
1303 {
1304 	return rtl_eri_read(tp, reg, ERIAR_OOB);
1305 }
1306 
ocp_read(struct rtl8169_private * tp,u8 mask,u16 reg)1307 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
1308 {
1309 	switch (tp->mac_version) {
1310 	case RTL_GIGA_MAC_VER_27:
1311 	case RTL_GIGA_MAC_VER_28:
1312 	case RTL_GIGA_MAC_VER_31:
1313 		return r8168dp_ocp_read(tp, mask, reg);
1314 	case RTL_GIGA_MAC_VER_49:
1315 	case RTL_GIGA_MAC_VER_50:
1316 	case RTL_GIGA_MAC_VER_51:
1317 		return r8168ep_ocp_read(tp, mask, reg);
1318 	default:
1319 		BUG();
1320 		return ~0;
1321 	}
1322 }
1323 
r8168dp_ocp_write(struct rtl8169_private * tp,u8 mask,u16 reg,u32 data)1324 static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
1325 			      u32 data)
1326 {
1327 	void __iomem *ioaddr = tp->mmio_addr;
1328 
1329 	RTL_W32(OCPDR, data);
1330 	RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
1331 	rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
1332 }
1333 
r8168ep_ocp_write(struct rtl8169_private * tp,u8 mask,u16 reg,u32 data)1334 static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
1335 			      u32 data)
1336 {
1337 	rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT,
1338 		      data, ERIAR_OOB);
1339 }
1340 
ocp_write(struct rtl8169_private * tp,u8 mask,u16 reg,u32 data)1341 static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
1342 {
1343 	switch (tp->mac_version) {
1344 	case RTL_GIGA_MAC_VER_27:
1345 	case RTL_GIGA_MAC_VER_28:
1346 	case RTL_GIGA_MAC_VER_31:
1347 		r8168dp_ocp_write(tp, mask, reg, data);
1348 		break;
1349 	case RTL_GIGA_MAC_VER_49:
1350 	case RTL_GIGA_MAC_VER_50:
1351 	case RTL_GIGA_MAC_VER_51:
1352 		r8168ep_ocp_write(tp, mask, reg, data);
1353 		break;
1354 	default:
1355 		BUG();
1356 		break;
1357 	}
1358 }
1359 
rtl8168_oob_notify(struct rtl8169_private * tp,u8 cmd)1360 static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
1361 {
1362 	rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd, ERIAR_EXGMAC);
1363 
1364 	ocp_write(tp, 0x1, 0x30, 0x00000001);
1365 }
1366 
1367 #define OOB_CMD_RESET		0x00
1368 #define OOB_CMD_DRIVER_START	0x05
1369 #define OOB_CMD_DRIVER_STOP	0x06
1370 
rtl8168_get_ocp_reg(struct rtl8169_private * tp)1371 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
1372 {
1373 	return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
1374 }
1375 
DECLARE_RTL_COND(rtl_ocp_read_cond)1376 DECLARE_RTL_COND(rtl_ocp_read_cond)
1377 {
1378 	u16 reg;
1379 
1380 	reg = rtl8168_get_ocp_reg(tp);
1381 
1382 	return ocp_read(tp, 0x0f, reg) & 0x00000800;
1383 }
1384 
DECLARE_RTL_COND(rtl_ep_ocp_read_cond)1385 DECLARE_RTL_COND(rtl_ep_ocp_read_cond)
1386 {
1387 	return ocp_read(tp, 0x0f, 0x124) & 0x00000001;
1388 }
1389 
DECLARE_RTL_COND(rtl_ocp_tx_cond)1390 DECLARE_RTL_COND(rtl_ocp_tx_cond)
1391 {
1392 	void __iomem *ioaddr = tp->mmio_addr;
1393 
1394 	return RTL_R8(IBISR0) & 0x20;
1395 }
1396 
rtl8168ep_stop_cmac(struct rtl8169_private * tp)1397 static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
1398 {
1399 	void __iomem *ioaddr = tp->mmio_addr;
1400 
1401 	RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01);
1402 	rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
1403 	RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20);
1404 	RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01);
1405 }
1406 
rtl8168dp_driver_start(struct rtl8169_private * tp)1407 static void rtl8168dp_driver_start(struct rtl8169_private *tp)
1408 {
1409 	rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
1410 	rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
1411 }
1412 
rtl8168ep_driver_start(struct rtl8169_private * tp)1413 static void rtl8168ep_driver_start(struct rtl8169_private *tp)
1414 {
1415 	ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
1416 	ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01);
1417 	rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10);
1418 }
1419 
rtl8168_driver_start(struct rtl8169_private * tp)1420 static void rtl8168_driver_start(struct rtl8169_private *tp)
1421 {
1422 	switch (tp->mac_version) {
1423 	case RTL_GIGA_MAC_VER_27:
1424 	case RTL_GIGA_MAC_VER_28:
1425 	case RTL_GIGA_MAC_VER_31:
1426 		rtl8168dp_driver_start(tp);
1427 		break;
1428 	case RTL_GIGA_MAC_VER_49:
1429 	case RTL_GIGA_MAC_VER_50:
1430 	case RTL_GIGA_MAC_VER_51:
1431 		rtl8168ep_driver_start(tp);
1432 		break;
1433 	default:
1434 		BUG();
1435 		break;
1436 	}
1437 }
1438 
rtl8168dp_driver_stop(struct rtl8169_private * tp)1439 static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
1440 {
1441 	rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
1442 	rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
1443 }
1444 
rtl8168ep_driver_stop(struct rtl8169_private * tp)1445 static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
1446 {
1447 	rtl8168ep_stop_cmac(tp);
1448 	ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
1449 	ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01);
1450 	rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10);
1451 }
1452 
rtl8168_driver_stop(struct rtl8169_private * tp)1453 static void rtl8168_driver_stop(struct rtl8169_private *tp)
1454 {
1455 	switch (tp->mac_version) {
1456 	case RTL_GIGA_MAC_VER_27:
1457 	case RTL_GIGA_MAC_VER_28:
1458 	case RTL_GIGA_MAC_VER_31:
1459 		rtl8168dp_driver_stop(tp);
1460 		break;
1461 	case RTL_GIGA_MAC_VER_49:
1462 	case RTL_GIGA_MAC_VER_50:
1463 	case RTL_GIGA_MAC_VER_51:
1464 		rtl8168ep_driver_stop(tp);
1465 		break;
1466 	default:
1467 		BUG();
1468 		break;
1469 	}
1470 }
1471 
r8168dp_check_dash(struct rtl8169_private * tp)1472 static int r8168dp_check_dash(struct rtl8169_private *tp)
1473 {
1474 	u16 reg = rtl8168_get_ocp_reg(tp);
1475 
1476 	return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
1477 }
1478 
r8168ep_check_dash(struct rtl8169_private * tp)1479 static int r8168ep_check_dash(struct rtl8169_private *tp)
1480 {
1481 	return (ocp_read(tp, 0x0f, 0x128) & 0x00000001) ? 1 : 0;
1482 }
1483 
r8168_check_dash(struct rtl8169_private * tp)1484 static int r8168_check_dash(struct rtl8169_private *tp)
1485 {
1486 	switch (tp->mac_version) {
1487 	case RTL_GIGA_MAC_VER_27:
1488 	case RTL_GIGA_MAC_VER_28:
1489 	case RTL_GIGA_MAC_VER_31:
1490 		return r8168dp_check_dash(tp);
1491 	case RTL_GIGA_MAC_VER_49:
1492 	case RTL_GIGA_MAC_VER_50:
1493 	case RTL_GIGA_MAC_VER_51:
1494 		return r8168ep_check_dash(tp);
1495 	default:
1496 		return 0;
1497 	}
1498 }
1499 
1500 struct exgmac_reg {
1501 	u16 addr;
1502 	u16 mask;
1503 	u32 val;
1504 };
1505 
rtl_write_exgmac_batch(struct rtl8169_private * tp,const struct exgmac_reg * r,int len)1506 static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1507 				   const struct exgmac_reg *r, int len)
1508 {
1509 	while (len-- > 0) {
1510 		rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1511 		r++;
1512 	}
1513 }
1514 
DECLARE_RTL_COND(rtl_efusear_cond)1515 DECLARE_RTL_COND(rtl_efusear_cond)
1516 {
1517 	void __iomem *ioaddr = tp->mmio_addr;
1518 
1519 	return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1520 }
1521 
rtl8168d_efuse_read(struct rtl8169_private * tp,int reg_addr)1522 static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1523 {
1524 	void __iomem *ioaddr = tp->mmio_addr;
1525 
1526 	RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1527 
1528 	return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1529 		RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1530 }
1531 
rtl_get_events(struct rtl8169_private * tp)1532 static u16 rtl_get_events(struct rtl8169_private *tp)
1533 {
1534 	void __iomem *ioaddr = tp->mmio_addr;
1535 
1536 	return RTL_R16(IntrStatus);
1537 }
1538 
rtl_ack_events(struct rtl8169_private * tp,u16 bits)1539 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1540 {
1541 	void __iomem *ioaddr = tp->mmio_addr;
1542 
1543 	RTL_W16(IntrStatus, bits);
1544 	mmiowb();
1545 }
1546 
rtl_irq_disable(struct rtl8169_private * tp)1547 static void rtl_irq_disable(struct rtl8169_private *tp)
1548 {
1549 	void __iomem *ioaddr = tp->mmio_addr;
1550 
1551 	RTL_W16(IntrMask, 0);
1552 	mmiowb();
1553 }
1554 
rtl_irq_enable(struct rtl8169_private * tp,u16 bits)1555 static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1556 {
1557 	void __iomem *ioaddr = tp->mmio_addr;
1558 
1559 	RTL_W16(IntrMask, bits);
1560 }
1561 
1562 #define RTL_EVENT_NAPI_RX	(RxOK | RxErr)
1563 #define RTL_EVENT_NAPI_TX	(TxOK | TxErr)
1564 #define RTL_EVENT_NAPI		(RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1565 
rtl_irq_enable_all(struct rtl8169_private * tp)1566 static void rtl_irq_enable_all(struct rtl8169_private *tp)
1567 {
1568 	rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1569 }
1570 
rtl8169_irq_mask_and_ack(struct rtl8169_private * tp)1571 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1572 {
1573 	void __iomem *ioaddr = tp->mmio_addr;
1574 
1575 	rtl_irq_disable(tp);
1576 	rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1577 	RTL_R8(ChipCmd);
1578 }
1579 
rtl8169_tbi_reset_pending(struct rtl8169_private * tp)1580 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1581 {
1582 	void __iomem *ioaddr = tp->mmio_addr;
1583 
1584 	return RTL_R32(TBICSR) & TBIReset;
1585 }
1586 
rtl8169_xmii_reset_pending(struct rtl8169_private * tp)1587 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1588 {
1589 	return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1590 }
1591 
rtl8169_tbi_link_ok(void __iomem * ioaddr)1592 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1593 {
1594 	return RTL_R32(TBICSR) & TBILinkOk;
1595 }
1596 
rtl8169_xmii_link_ok(void __iomem * ioaddr)1597 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1598 {
1599 	return RTL_R8(PHYstatus) & LinkStatus;
1600 }
1601 
rtl8169_tbi_reset_enable(struct rtl8169_private * tp)1602 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1603 {
1604 	void __iomem *ioaddr = tp->mmio_addr;
1605 
1606 	RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1607 }
1608 
rtl8169_xmii_reset_enable(struct rtl8169_private * tp)1609 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1610 {
1611 	unsigned int val;
1612 
1613 	val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1614 	rtl_writephy(tp, MII_BMCR, val & 0xffff);
1615 }
1616 
rtl_link_chg_patch(struct rtl8169_private * tp)1617 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1618 {
1619 	void __iomem *ioaddr = tp->mmio_addr;
1620 	struct net_device *dev = tp->dev;
1621 
1622 	if (!netif_running(dev))
1623 		return;
1624 
1625 	if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1626 	    tp->mac_version == RTL_GIGA_MAC_VER_38) {
1627 		if (RTL_R8(PHYstatus) & _1000bpsF) {
1628 			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1629 				      ERIAR_EXGMAC);
1630 			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1631 				      ERIAR_EXGMAC);
1632 		} else if (RTL_R8(PHYstatus) & _100bps) {
1633 			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1634 				      ERIAR_EXGMAC);
1635 			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1636 				      ERIAR_EXGMAC);
1637 		} else {
1638 			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1639 				      ERIAR_EXGMAC);
1640 			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1641 				      ERIAR_EXGMAC);
1642 		}
1643 		/* Reset packet filter */
1644 		rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1645 			     ERIAR_EXGMAC);
1646 		rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1647 			     ERIAR_EXGMAC);
1648 	} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1649 		   tp->mac_version == RTL_GIGA_MAC_VER_36) {
1650 		if (RTL_R8(PHYstatus) & _1000bpsF) {
1651 			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1652 				      ERIAR_EXGMAC);
1653 			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1654 				      ERIAR_EXGMAC);
1655 		} else {
1656 			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1657 				      ERIAR_EXGMAC);
1658 			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1659 				      ERIAR_EXGMAC);
1660 		}
1661 	} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1662 		if (RTL_R8(PHYstatus) & _10bps) {
1663 			rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1664 				      ERIAR_EXGMAC);
1665 			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1666 				      ERIAR_EXGMAC);
1667 		} else {
1668 			rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1669 				      ERIAR_EXGMAC);
1670 		}
1671 	}
1672 }
1673 
__rtl8169_check_link_status(struct net_device * dev,struct rtl8169_private * tp,void __iomem * ioaddr,bool pm)1674 static void __rtl8169_check_link_status(struct net_device *dev,
1675 					struct rtl8169_private *tp,
1676 					void __iomem *ioaddr, bool pm)
1677 {
1678 	if (tp->link_ok(ioaddr)) {
1679 		rtl_link_chg_patch(tp);
1680 		/* This is to cancel a scheduled suspend if there's one. */
1681 		if (pm)
1682 			pm_request_resume(&tp->pci_dev->dev);
1683 		netif_carrier_on(dev);
1684 		if (net_ratelimit())
1685 			netif_info(tp, ifup, dev, "link up\n");
1686 	} else {
1687 		netif_carrier_off(dev);
1688 		netif_info(tp, ifdown, dev, "link down\n");
1689 		if (pm)
1690 			pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1691 	}
1692 }
1693 
rtl8169_check_link_status(struct net_device * dev,struct rtl8169_private * tp,void __iomem * ioaddr)1694 static void rtl8169_check_link_status(struct net_device *dev,
1695 				      struct rtl8169_private *tp,
1696 				      void __iomem *ioaddr)
1697 {
1698 	__rtl8169_check_link_status(dev, tp, ioaddr, false);
1699 }
1700 
1701 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1702 
__rtl8169_get_wol(struct rtl8169_private * tp)1703 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1704 {
1705 	void __iomem *ioaddr = tp->mmio_addr;
1706 	u8 options;
1707 	u32 wolopts = 0;
1708 
1709 	options = RTL_R8(Config1);
1710 	if (!(options & PMEnable))
1711 		return 0;
1712 
1713 	options = RTL_R8(Config3);
1714 	if (options & LinkUp)
1715 		wolopts |= WAKE_PHY;
1716 	switch (tp->mac_version) {
1717 	case RTL_GIGA_MAC_VER_34:
1718 	case RTL_GIGA_MAC_VER_35:
1719 	case RTL_GIGA_MAC_VER_36:
1720 	case RTL_GIGA_MAC_VER_37:
1721 	case RTL_GIGA_MAC_VER_38:
1722 	case RTL_GIGA_MAC_VER_40:
1723 	case RTL_GIGA_MAC_VER_41:
1724 	case RTL_GIGA_MAC_VER_42:
1725 	case RTL_GIGA_MAC_VER_43:
1726 	case RTL_GIGA_MAC_VER_44:
1727 	case RTL_GIGA_MAC_VER_45:
1728 	case RTL_GIGA_MAC_VER_46:
1729 	case RTL_GIGA_MAC_VER_47:
1730 	case RTL_GIGA_MAC_VER_48:
1731 	case RTL_GIGA_MAC_VER_49:
1732 	case RTL_GIGA_MAC_VER_50:
1733 	case RTL_GIGA_MAC_VER_51:
1734 		if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
1735 			wolopts |= WAKE_MAGIC;
1736 		break;
1737 	default:
1738 		if (options & MagicPacket)
1739 			wolopts |= WAKE_MAGIC;
1740 		break;
1741 	}
1742 
1743 	options = RTL_R8(Config5);
1744 	if (options & UWF)
1745 		wolopts |= WAKE_UCAST;
1746 	if (options & BWF)
1747 		wolopts |= WAKE_BCAST;
1748 	if (options & MWF)
1749 		wolopts |= WAKE_MCAST;
1750 
1751 	return wolopts;
1752 }
1753 
rtl8169_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)1754 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1755 {
1756 	struct rtl8169_private *tp = netdev_priv(dev);
1757 	struct device *d = &tp->pci_dev->dev;
1758 
1759 	pm_runtime_get_noresume(d);
1760 
1761 	rtl_lock_work(tp);
1762 
1763 	wol->supported = WAKE_ANY;
1764 	if (pm_runtime_active(d))
1765 		wol->wolopts = __rtl8169_get_wol(tp);
1766 	else
1767 		wol->wolopts = tp->saved_wolopts;
1768 
1769 	rtl_unlock_work(tp);
1770 
1771 	pm_runtime_put_noidle(d);
1772 }
1773 
__rtl8169_set_wol(struct rtl8169_private * tp,u32 wolopts)1774 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1775 {
1776 	void __iomem *ioaddr = tp->mmio_addr;
1777 	unsigned int i, tmp;
1778 	static const struct {
1779 		u32 opt;
1780 		u16 reg;
1781 		u8  mask;
1782 	} cfg[] = {
1783 		{ WAKE_PHY,   Config3, LinkUp },
1784 		{ WAKE_UCAST, Config5, UWF },
1785 		{ WAKE_BCAST, Config5, BWF },
1786 		{ WAKE_MCAST, Config5, MWF },
1787 		{ WAKE_ANY,   Config5, LanWake },
1788 		{ WAKE_MAGIC, Config3, MagicPacket }
1789 	};
1790 	u8 options;
1791 
1792 	RTL_W8(Cfg9346, Cfg9346_Unlock);
1793 
1794 	switch (tp->mac_version) {
1795 	case RTL_GIGA_MAC_VER_34:
1796 	case RTL_GIGA_MAC_VER_35:
1797 	case RTL_GIGA_MAC_VER_36:
1798 	case RTL_GIGA_MAC_VER_37:
1799 	case RTL_GIGA_MAC_VER_38:
1800 	case RTL_GIGA_MAC_VER_40:
1801 	case RTL_GIGA_MAC_VER_41:
1802 	case RTL_GIGA_MAC_VER_42:
1803 	case RTL_GIGA_MAC_VER_43:
1804 	case RTL_GIGA_MAC_VER_44:
1805 	case RTL_GIGA_MAC_VER_45:
1806 	case RTL_GIGA_MAC_VER_46:
1807 	case RTL_GIGA_MAC_VER_47:
1808 	case RTL_GIGA_MAC_VER_48:
1809 	case RTL_GIGA_MAC_VER_49:
1810 	case RTL_GIGA_MAC_VER_50:
1811 	case RTL_GIGA_MAC_VER_51:
1812 		tmp = ARRAY_SIZE(cfg) - 1;
1813 		if (wolopts & WAKE_MAGIC)
1814 			rtl_w0w1_eri(tp,
1815 				     0x0dc,
1816 				     ERIAR_MASK_0100,
1817 				     MagicPacket_v2,
1818 				     0x0000,
1819 				     ERIAR_EXGMAC);
1820 		else
1821 			rtl_w0w1_eri(tp,
1822 				     0x0dc,
1823 				     ERIAR_MASK_0100,
1824 				     0x0000,
1825 				     MagicPacket_v2,
1826 				     ERIAR_EXGMAC);
1827 		break;
1828 	default:
1829 		tmp = ARRAY_SIZE(cfg);
1830 		break;
1831 	}
1832 
1833 	for (i = 0; i < tmp; i++) {
1834 		options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1835 		if (wolopts & cfg[i].opt)
1836 			options |= cfg[i].mask;
1837 		RTL_W8(cfg[i].reg, options);
1838 	}
1839 
1840 	switch (tp->mac_version) {
1841 	case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1842 		options = RTL_R8(Config1) & ~PMEnable;
1843 		if (wolopts)
1844 			options |= PMEnable;
1845 		RTL_W8(Config1, options);
1846 		break;
1847 	default:
1848 		options = RTL_R8(Config2) & ~PME_SIGNAL;
1849 		if (wolopts)
1850 			options |= PME_SIGNAL;
1851 		RTL_W8(Config2, options);
1852 		break;
1853 	}
1854 
1855 	RTL_W8(Cfg9346, Cfg9346_Lock);
1856 }
1857 
rtl8169_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)1858 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1859 {
1860 	struct rtl8169_private *tp = netdev_priv(dev);
1861 	struct device *d = &tp->pci_dev->dev;
1862 
1863 	pm_runtime_get_noresume(d);
1864 
1865 	rtl_lock_work(tp);
1866 
1867 	if (wol->wolopts)
1868 		tp->features |= RTL_FEATURE_WOL;
1869 	else
1870 		tp->features &= ~RTL_FEATURE_WOL;
1871 	if (pm_runtime_active(d))
1872 		__rtl8169_set_wol(tp, wol->wolopts);
1873 	else
1874 		tp->saved_wolopts = wol->wolopts;
1875 
1876 	rtl_unlock_work(tp);
1877 
1878 	device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1879 
1880 	pm_runtime_put_noidle(d);
1881 
1882 	return 0;
1883 }
1884 
rtl_lookup_firmware_name(struct rtl8169_private * tp)1885 static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1886 {
1887 	return rtl_chip_infos[tp->mac_version].fw_name;
1888 }
1889 
rtl8169_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1890 static void rtl8169_get_drvinfo(struct net_device *dev,
1891 				struct ethtool_drvinfo *info)
1892 {
1893 	struct rtl8169_private *tp = netdev_priv(dev);
1894 	struct rtl_fw *rtl_fw = tp->rtl_fw;
1895 
1896 	strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1897 	strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1898 	strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1899 	BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1900 	if (!IS_ERR_OR_NULL(rtl_fw))
1901 		strlcpy(info->fw_version, rtl_fw->version,
1902 			sizeof(info->fw_version));
1903 }
1904 
rtl8169_get_regs_len(struct net_device * dev)1905 static int rtl8169_get_regs_len(struct net_device *dev)
1906 {
1907 	return R8169_REGS_SIZE;
1908 }
1909 
rtl8169_set_speed_tbi(struct net_device * dev,u8 autoneg,u16 speed,u8 duplex,u32 ignored)1910 static int rtl8169_set_speed_tbi(struct net_device *dev,
1911 				 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1912 {
1913 	struct rtl8169_private *tp = netdev_priv(dev);
1914 	void __iomem *ioaddr = tp->mmio_addr;
1915 	int ret = 0;
1916 	u32 reg;
1917 
1918 	reg = RTL_R32(TBICSR);
1919 	if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1920 	    (duplex == DUPLEX_FULL)) {
1921 		RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1922 	} else if (autoneg == AUTONEG_ENABLE)
1923 		RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1924 	else {
1925 		netif_warn(tp, link, dev,
1926 			   "incorrect speed setting refused in TBI mode\n");
1927 		ret = -EOPNOTSUPP;
1928 	}
1929 
1930 	return ret;
1931 }
1932 
rtl8169_set_speed_xmii(struct net_device * dev,u8 autoneg,u16 speed,u8 duplex,u32 adv)1933 static int rtl8169_set_speed_xmii(struct net_device *dev,
1934 				  u8 autoneg, u16 speed, u8 duplex, u32 adv)
1935 {
1936 	struct rtl8169_private *tp = netdev_priv(dev);
1937 	int giga_ctrl, bmcr;
1938 	int rc = -EINVAL;
1939 
1940 	rtl_writephy(tp, 0x1f, 0x0000);
1941 
1942 	if (autoneg == AUTONEG_ENABLE) {
1943 		int auto_nego;
1944 
1945 		auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1946 		auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1947 				ADVERTISE_100HALF | ADVERTISE_100FULL);
1948 
1949 		if (adv & ADVERTISED_10baseT_Half)
1950 			auto_nego |= ADVERTISE_10HALF;
1951 		if (adv & ADVERTISED_10baseT_Full)
1952 			auto_nego |= ADVERTISE_10FULL;
1953 		if (adv & ADVERTISED_100baseT_Half)
1954 			auto_nego |= ADVERTISE_100HALF;
1955 		if (adv & ADVERTISED_100baseT_Full)
1956 			auto_nego |= ADVERTISE_100FULL;
1957 
1958 		auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1959 
1960 		giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1961 		giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1962 
1963 		/* The 8100e/8101e/8102e do Fast Ethernet only. */
1964 		if (tp->mii.supports_gmii) {
1965 			if (adv & ADVERTISED_1000baseT_Half)
1966 				giga_ctrl |= ADVERTISE_1000HALF;
1967 			if (adv & ADVERTISED_1000baseT_Full)
1968 				giga_ctrl |= ADVERTISE_1000FULL;
1969 		} else if (adv & (ADVERTISED_1000baseT_Half |
1970 				  ADVERTISED_1000baseT_Full)) {
1971 			netif_info(tp, link, dev,
1972 				   "PHY does not support 1000Mbps\n");
1973 			goto out;
1974 		}
1975 
1976 		bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1977 
1978 		rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1979 		rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1980 	} else {
1981 		giga_ctrl = 0;
1982 
1983 		if (speed == SPEED_10)
1984 			bmcr = 0;
1985 		else if (speed == SPEED_100)
1986 			bmcr = BMCR_SPEED100;
1987 		else
1988 			goto out;
1989 
1990 		if (duplex == DUPLEX_FULL)
1991 			bmcr |= BMCR_FULLDPLX;
1992 	}
1993 
1994 	rtl_writephy(tp, MII_BMCR, bmcr);
1995 
1996 	if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1997 	    tp->mac_version == RTL_GIGA_MAC_VER_03) {
1998 		if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1999 			rtl_writephy(tp, 0x17, 0x2138);
2000 			rtl_writephy(tp, 0x0e, 0x0260);
2001 		} else {
2002 			rtl_writephy(tp, 0x17, 0x2108);
2003 			rtl_writephy(tp, 0x0e, 0x0000);
2004 		}
2005 	}
2006 
2007 	rc = 0;
2008 out:
2009 	return rc;
2010 }
2011 
rtl8169_set_speed(struct net_device * dev,u8 autoneg,u16 speed,u8 duplex,u32 advertising)2012 static int rtl8169_set_speed(struct net_device *dev,
2013 			     u8 autoneg, u16 speed, u8 duplex, u32 advertising)
2014 {
2015 	struct rtl8169_private *tp = netdev_priv(dev);
2016 	int ret;
2017 
2018 	ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
2019 	if (ret < 0)
2020 		goto out;
2021 
2022 	if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
2023 	    (advertising & ADVERTISED_1000baseT_Full) &&
2024 	    !pci_is_pcie(tp->pci_dev)) {
2025 		mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
2026 	}
2027 out:
2028 	return ret;
2029 }
2030 
rtl8169_fix_features(struct net_device * dev,netdev_features_t features)2031 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
2032 	netdev_features_t features)
2033 {
2034 	struct rtl8169_private *tp = netdev_priv(dev);
2035 
2036 	if (dev->mtu > TD_MSS_MAX)
2037 		features &= ~NETIF_F_ALL_TSO;
2038 
2039 	if (dev->mtu > JUMBO_1K &&
2040 	    !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
2041 		features &= ~NETIF_F_IP_CSUM;
2042 
2043 	return features;
2044 }
2045 
__rtl8169_set_features(struct net_device * dev,netdev_features_t features)2046 static void __rtl8169_set_features(struct net_device *dev,
2047 				   netdev_features_t features)
2048 {
2049 	struct rtl8169_private *tp = netdev_priv(dev);
2050 	void __iomem *ioaddr = tp->mmio_addr;
2051 	u32 rx_config;
2052 
2053 	rx_config = RTL_R32(RxConfig);
2054 	if (features & NETIF_F_RXALL)
2055 		rx_config |= (AcceptErr | AcceptRunt);
2056 	else
2057 		rx_config &= ~(AcceptErr | AcceptRunt);
2058 
2059 	RTL_W32(RxConfig, rx_config);
2060 
2061 	if (features & NETIF_F_RXCSUM)
2062 		tp->cp_cmd |= RxChkSum;
2063 	else
2064 		tp->cp_cmd &= ~RxChkSum;
2065 
2066 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2067 		tp->cp_cmd |= RxVlan;
2068 	else
2069 		tp->cp_cmd &= ~RxVlan;
2070 
2071 	tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum);
2072 
2073 	RTL_W16(CPlusCmd, tp->cp_cmd);
2074 	RTL_R16(CPlusCmd);
2075 }
2076 
rtl8169_set_features(struct net_device * dev,netdev_features_t features)2077 static int rtl8169_set_features(struct net_device *dev,
2078 				netdev_features_t features)
2079 {
2080 	struct rtl8169_private *tp = netdev_priv(dev);
2081 
2082 	features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
2083 
2084 	rtl_lock_work(tp);
2085 	if (features ^ dev->features)
2086 		__rtl8169_set_features(dev, features);
2087 	rtl_unlock_work(tp);
2088 
2089 	return 0;
2090 }
2091 
2092 
rtl8169_tx_vlan_tag(struct sk_buff * skb)2093 static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
2094 {
2095 	return (skb_vlan_tag_present(skb)) ?
2096 		TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
2097 }
2098 
rtl8169_rx_vlan_tag(struct RxDesc * desc,struct sk_buff * skb)2099 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
2100 {
2101 	u32 opts2 = le32_to_cpu(desc->opts2);
2102 
2103 	if (opts2 & RxVlanTag)
2104 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
2105 }
2106 
rtl8169_get_link_ksettings_tbi(struct net_device * dev,struct ethtool_link_ksettings * cmd)2107 static int rtl8169_get_link_ksettings_tbi(struct net_device *dev,
2108 					  struct ethtool_link_ksettings *cmd)
2109 {
2110 	struct rtl8169_private *tp = netdev_priv(dev);
2111 	void __iomem *ioaddr = tp->mmio_addr;
2112 	u32 status;
2113 	u32 supported, advertising;
2114 
2115 	supported =
2116 		SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
2117 	cmd->base.port = PORT_FIBRE;
2118 
2119 	status = RTL_R32(TBICSR);
2120 	advertising = (status & TBINwEnable) ?  ADVERTISED_Autoneg : 0;
2121 	cmd->base.autoneg = !!(status & TBINwEnable);
2122 
2123 	cmd->base.speed = SPEED_1000;
2124 	cmd->base.duplex = DUPLEX_FULL; /* Always set */
2125 
2126 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2127 						supported);
2128 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2129 						advertising);
2130 
2131 	return 0;
2132 }
2133 
rtl8169_get_link_ksettings_xmii(struct net_device * dev,struct ethtool_link_ksettings * cmd)2134 static int rtl8169_get_link_ksettings_xmii(struct net_device *dev,
2135 					   struct ethtool_link_ksettings *cmd)
2136 {
2137 	struct rtl8169_private *tp = netdev_priv(dev);
2138 
2139 	mii_ethtool_get_link_ksettings(&tp->mii, cmd);
2140 
2141 	return 0;
2142 }
2143 
rtl8169_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)2144 static int rtl8169_get_link_ksettings(struct net_device *dev,
2145 				      struct ethtool_link_ksettings *cmd)
2146 {
2147 	struct rtl8169_private *tp = netdev_priv(dev);
2148 	int rc;
2149 
2150 	rtl_lock_work(tp);
2151 	rc = tp->get_link_ksettings(dev, cmd);
2152 	rtl_unlock_work(tp);
2153 
2154 	return rc;
2155 }
2156 
rtl8169_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)2157 static int rtl8169_set_link_ksettings(struct net_device *dev,
2158 				      const struct ethtool_link_ksettings *cmd)
2159 {
2160 	struct rtl8169_private *tp = netdev_priv(dev);
2161 	int rc;
2162 	u32 advertising;
2163 
2164 	if (!ethtool_convert_link_mode_to_legacy_u32(&advertising,
2165 	    cmd->link_modes.advertising))
2166 		return -EINVAL;
2167 
2168 	del_timer_sync(&tp->timer);
2169 
2170 	rtl_lock_work(tp);
2171 	rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed,
2172 			       cmd->base.duplex, advertising);
2173 	rtl_unlock_work(tp);
2174 
2175 	return rc;
2176 }
2177 
rtl8169_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * p)2178 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2179 			     void *p)
2180 {
2181 	struct rtl8169_private *tp = netdev_priv(dev);
2182 	u32 __iomem *data = tp->mmio_addr;
2183 	u32 *dw = p;
2184 	int i;
2185 
2186 	rtl_lock_work(tp);
2187 	for (i = 0; i < R8169_REGS_SIZE; i += 4)
2188 		memcpy_fromio(dw++, data++, 4);
2189 	rtl_unlock_work(tp);
2190 }
2191 
rtl8169_get_msglevel(struct net_device * dev)2192 static u32 rtl8169_get_msglevel(struct net_device *dev)
2193 {
2194 	struct rtl8169_private *tp = netdev_priv(dev);
2195 
2196 	return tp->msg_enable;
2197 }
2198 
rtl8169_set_msglevel(struct net_device * dev,u32 value)2199 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
2200 {
2201 	struct rtl8169_private *tp = netdev_priv(dev);
2202 
2203 	tp->msg_enable = value;
2204 }
2205 
2206 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
2207 	"tx_packets",
2208 	"rx_packets",
2209 	"tx_errors",
2210 	"rx_errors",
2211 	"rx_missed",
2212 	"align_errors",
2213 	"tx_single_collisions",
2214 	"tx_multi_collisions",
2215 	"unicast",
2216 	"broadcast",
2217 	"multicast",
2218 	"tx_aborted",
2219 	"tx_underrun",
2220 };
2221 
rtl8169_get_sset_count(struct net_device * dev,int sset)2222 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
2223 {
2224 	switch (sset) {
2225 	case ETH_SS_STATS:
2226 		return ARRAY_SIZE(rtl8169_gstrings);
2227 	default:
2228 		return -EOPNOTSUPP;
2229 	}
2230 }
2231 
DECLARE_RTL_COND(rtl_counters_cond)2232 DECLARE_RTL_COND(rtl_counters_cond)
2233 {
2234 	void __iomem *ioaddr = tp->mmio_addr;
2235 
2236 	return RTL_R32(CounterAddrLow) & (CounterReset | CounterDump);
2237 }
2238 
rtl8169_do_counters(struct net_device * dev,u32 counter_cmd)2239 static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
2240 {
2241 	struct rtl8169_private *tp = netdev_priv(dev);
2242 	void __iomem *ioaddr = tp->mmio_addr;
2243 	dma_addr_t paddr = tp->counters_phys_addr;
2244 	u32 cmd;
2245 
2246 	RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
2247 	RTL_R32(CounterAddrHigh);
2248 	cmd = (u64)paddr & DMA_BIT_MASK(32);
2249 	RTL_W32(CounterAddrLow, cmd);
2250 	RTL_W32(CounterAddrLow, cmd | counter_cmd);
2251 
2252 	return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
2253 }
2254 
rtl8169_reset_counters(struct net_device * dev)2255 static bool rtl8169_reset_counters(struct net_device *dev)
2256 {
2257 	struct rtl8169_private *tp = netdev_priv(dev);
2258 
2259 	/*
2260 	 * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
2261 	 * tally counters.
2262 	 */
2263 	if (tp->mac_version < RTL_GIGA_MAC_VER_19)
2264 		return true;
2265 
2266 	return rtl8169_do_counters(dev, CounterReset);
2267 }
2268 
rtl8169_update_counters(struct net_device * dev)2269 static bool rtl8169_update_counters(struct net_device *dev)
2270 {
2271 	struct rtl8169_private *tp = netdev_priv(dev);
2272 	void __iomem *ioaddr = tp->mmio_addr;
2273 
2274 	/*
2275 	 * Some chips are unable to dump tally counters when the receiver
2276 	 * is disabled.
2277 	 */
2278 	if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
2279 		return true;
2280 
2281 	return rtl8169_do_counters(dev, CounterDump);
2282 }
2283 
rtl8169_init_counter_offsets(struct net_device * dev)2284 static bool rtl8169_init_counter_offsets(struct net_device *dev)
2285 {
2286 	struct rtl8169_private *tp = netdev_priv(dev);
2287 	struct rtl8169_counters *counters = tp->counters;
2288 	bool ret = false;
2289 
2290 	/*
2291 	 * rtl8169_init_counter_offsets is called from rtl_open.  On chip
2292 	 * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only
2293 	 * reset by a power cycle, while the counter values collected by the
2294 	 * driver are reset at every driver unload/load cycle.
2295 	 *
2296 	 * To make sure the HW values returned by @get_stats64 match the SW
2297 	 * values, we collect the initial values at first open(*) and use them
2298 	 * as offsets to normalize the values returned by @get_stats64.
2299 	 *
2300 	 * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one
2301 	 * for the reason stated in rtl8169_update_counters; CmdRxEnb is only
2302 	 * set at open time by rtl_hw_start.
2303 	 */
2304 
2305 	if (tp->tc_offset.inited)
2306 		return true;
2307 
2308 	/* If both, reset and update fail, propagate to caller. */
2309 	if (rtl8169_reset_counters(dev))
2310 		ret = true;
2311 
2312 	if (rtl8169_update_counters(dev))
2313 		ret = true;
2314 
2315 	tp->tc_offset.tx_errors = counters->tx_errors;
2316 	tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
2317 	tp->tc_offset.tx_aborted = counters->tx_aborted;
2318 	tp->tc_offset.inited = true;
2319 
2320 	return ret;
2321 }
2322 
rtl8169_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2323 static void rtl8169_get_ethtool_stats(struct net_device *dev,
2324 				      struct ethtool_stats *stats, u64 *data)
2325 {
2326 	struct rtl8169_private *tp = netdev_priv(dev);
2327 	struct device *d = &tp->pci_dev->dev;
2328 	struct rtl8169_counters *counters = tp->counters;
2329 
2330 	ASSERT_RTNL();
2331 
2332 	pm_runtime_get_noresume(d);
2333 
2334 	if (pm_runtime_active(d))
2335 		rtl8169_update_counters(dev);
2336 
2337 	pm_runtime_put_noidle(d);
2338 
2339 	data[0] = le64_to_cpu(counters->tx_packets);
2340 	data[1] = le64_to_cpu(counters->rx_packets);
2341 	data[2] = le64_to_cpu(counters->tx_errors);
2342 	data[3] = le32_to_cpu(counters->rx_errors);
2343 	data[4] = le16_to_cpu(counters->rx_missed);
2344 	data[5] = le16_to_cpu(counters->align_errors);
2345 	data[6] = le32_to_cpu(counters->tx_one_collision);
2346 	data[7] = le32_to_cpu(counters->tx_multi_collision);
2347 	data[8] = le64_to_cpu(counters->rx_unicast);
2348 	data[9] = le64_to_cpu(counters->rx_broadcast);
2349 	data[10] = le32_to_cpu(counters->rx_multicast);
2350 	data[11] = le16_to_cpu(counters->tx_aborted);
2351 	data[12] = le16_to_cpu(counters->tx_underun);
2352 }
2353 
rtl8169_get_strings(struct net_device * dev,u32 stringset,u8 * data)2354 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2355 {
2356 	switch(stringset) {
2357 	case ETH_SS_STATS:
2358 		memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
2359 		break;
2360 	}
2361 }
2362 
rtl8169_nway_reset(struct net_device * dev)2363 static int rtl8169_nway_reset(struct net_device *dev)
2364 {
2365 	struct rtl8169_private *tp = netdev_priv(dev);
2366 
2367 	return mii_nway_restart(&tp->mii);
2368 }
2369 
2370 static const struct ethtool_ops rtl8169_ethtool_ops = {
2371 	.get_drvinfo		= rtl8169_get_drvinfo,
2372 	.get_regs_len		= rtl8169_get_regs_len,
2373 	.get_link		= ethtool_op_get_link,
2374 	.get_msglevel		= rtl8169_get_msglevel,
2375 	.set_msglevel		= rtl8169_set_msglevel,
2376 	.get_regs		= rtl8169_get_regs,
2377 	.get_wol		= rtl8169_get_wol,
2378 	.set_wol		= rtl8169_set_wol,
2379 	.get_strings		= rtl8169_get_strings,
2380 	.get_sset_count		= rtl8169_get_sset_count,
2381 	.get_ethtool_stats	= rtl8169_get_ethtool_stats,
2382 	.get_ts_info		= ethtool_op_get_ts_info,
2383 	.nway_reset		= rtl8169_nway_reset,
2384 	.get_link_ksettings	= rtl8169_get_link_ksettings,
2385 	.set_link_ksettings	= rtl8169_set_link_ksettings,
2386 };
2387 
rtl8169_get_mac_version(struct rtl8169_private * tp,struct net_device * dev,u8 default_version)2388 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2389 				    struct net_device *dev, u8 default_version)
2390 {
2391 	void __iomem *ioaddr = tp->mmio_addr;
2392 	/*
2393 	 * The driver currently handles the 8168Bf and the 8168Be identically
2394 	 * but they can be identified more specifically through the test below
2395 	 * if needed:
2396 	 *
2397 	 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2398 	 *
2399 	 * Same thing for the 8101Eb and the 8101Ec:
2400 	 *
2401 	 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2402 	 */
2403 	static const struct rtl_mac_info {
2404 		u32 mask;
2405 		u32 val;
2406 		int mac_version;
2407 	} mac_info[] = {
2408 		/* 8168EP family. */
2409 		{ 0x7cf00000, 0x50200000,	RTL_GIGA_MAC_VER_51 },
2410 		{ 0x7cf00000, 0x50100000,	RTL_GIGA_MAC_VER_50 },
2411 		{ 0x7cf00000, 0x50000000,	RTL_GIGA_MAC_VER_49 },
2412 
2413 		/* 8168H family. */
2414 		{ 0x7cf00000, 0x54100000,	RTL_GIGA_MAC_VER_46 },
2415 		{ 0x7cf00000, 0x54000000,	RTL_GIGA_MAC_VER_45 },
2416 
2417 		/* 8168G family. */
2418 		{ 0x7cf00000, 0x5c800000,	RTL_GIGA_MAC_VER_44 },
2419 		{ 0x7cf00000, 0x50900000,	RTL_GIGA_MAC_VER_42 },
2420 		{ 0x7cf00000, 0x4c100000,	RTL_GIGA_MAC_VER_41 },
2421 		{ 0x7cf00000, 0x4c000000,	RTL_GIGA_MAC_VER_40 },
2422 
2423 		/* 8168F family. */
2424 		{ 0x7c800000, 0x48800000,	RTL_GIGA_MAC_VER_38 },
2425 		{ 0x7cf00000, 0x48100000,	RTL_GIGA_MAC_VER_36 },
2426 		{ 0x7cf00000, 0x48000000,	RTL_GIGA_MAC_VER_35 },
2427 
2428 		/* 8168E family. */
2429 		{ 0x7c800000, 0x2c800000,	RTL_GIGA_MAC_VER_34 },
2430 		{ 0x7cf00000, 0x2c200000,	RTL_GIGA_MAC_VER_33 },
2431 		{ 0x7cf00000, 0x2c100000,	RTL_GIGA_MAC_VER_32 },
2432 		{ 0x7c800000, 0x2c000000,	RTL_GIGA_MAC_VER_33 },
2433 
2434 		/* 8168D family. */
2435 		{ 0x7cf00000, 0x28300000,	RTL_GIGA_MAC_VER_26 },
2436 		{ 0x7cf00000, 0x28100000,	RTL_GIGA_MAC_VER_25 },
2437 		{ 0x7c800000, 0x28000000,	RTL_GIGA_MAC_VER_26 },
2438 
2439 		/* 8168DP family. */
2440 		{ 0x7cf00000, 0x28800000,	RTL_GIGA_MAC_VER_27 },
2441 		{ 0x7cf00000, 0x28a00000,	RTL_GIGA_MAC_VER_28 },
2442 		{ 0x7cf00000, 0x28b00000,	RTL_GIGA_MAC_VER_31 },
2443 
2444 		/* 8168C family. */
2445 		{ 0x7cf00000, 0x3cb00000,	RTL_GIGA_MAC_VER_24 },
2446 		{ 0x7cf00000, 0x3c900000,	RTL_GIGA_MAC_VER_23 },
2447 		{ 0x7cf00000, 0x3c800000,	RTL_GIGA_MAC_VER_18 },
2448 		{ 0x7c800000, 0x3c800000,	RTL_GIGA_MAC_VER_24 },
2449 		{ 0x7cf00000, 0x3c000000,	RTL_GIGA_MAC_VER_19 },
2450 		{ 0x7cf00000, 0x3c200000,	RTL_GIGA_MAC_VER_20 },
2451 		{ 0x7cf00000, 0x3c300000,	RTL_GIGA_MAC_VER_21 },
2452 		{ 0x7cf00000, 0x3c400000,	RTL_GIGA_MAC_VER_22 },
2453 		{ 0x7c800000, 0x3c000000,	RTL_GIGA_MAC_VER_22 },
2454 
2455 		/* 8168B family. */
2456 		{ 0x7cf00000, 0x38000000,	RTL_GIGA_MAC_VER_12 },
2457 		{ 0x7cf00000, 0x38500000,	RTL_GIGA_MAC_VER_17 },
2458 		{ 0x7c800000, 0x38000000,	RTL_GIGA_MAC_VER_17 },
2459 		{ 0x7c800000, 0x30000000,	RTL_GIGA_MAC_VER_11 },
2460 
2461 		/* 8101 family. */
2462 		{ 0x7cf00000, 0x44900000,	RTL_GIGA_MAC_VER_39 },
2463 		{ 0x7c800000, 0x44800000,	RTL_GIGA_MAC_VER_39 },
2464 		{ 0x7c800000, 0x44000000,	RTL_GIGA_MAC_VER_37 },
2465 		{ 0x7cf00000, 0x40b00000,	RTL_GIGA_MAC_VER_30 },
2466 		{ 0x7cf00000, 0x40a00000,	RTL_GIGA_MAC_VER_30 },
2467 		{ 0x7cf00000, 0x40900000,	RTL_GIGA_MAC_VER_29 },
2468 		{ 0x7c800000, 0x40800000,	RTL_GIGA_MAC_VER_30 },
2469 		{ 0x7cf00000, 0x34a00000,	RTL_GIGA_MAC_VER_09 },
2470 		{ 0x7cf00000, 0x24a00000,	RTL_GIGA_MAC_VER_09 },
2471 		{ 0x7cf00000, 0x34900000,	RTL_GIGA_MAC_VER_08 },
2472 		{ 0x7cf00000, 0x24900000,	RTL_GIGA_MAC_VER_08 },
2473 		{ 0x7cf00000, 0x34800000,	RTL_GIGA_MAC_VER_07 },
2474 		{ 0x7cf00000, 0x24800000,	RTL_GIGA_MAC_VER_07 },
2475 		{ 0x7cf00000, 0x34000000,	RTL_GIGA_MAC_VER_13 },
2476 		{ 0x7cf00000, 0x34300000,	RTL_GIGA_MAC_VER_10 },
2477 		{ 0x7cf00000, 0x34200000,	RTL_GIGA_MAC_VER_16 },
2478 		{ 0x7c800000, 0x34800000,	RTL_GIGA_MAC_VER_09 },
2479 		{ 0x7c800000, 0x24800000,	RTL_GIGA_MAC_VER_09 },
2480 		{ 0x7c800000, 0x34000000,	RTL_GIGA_MAC_VER_16 },
2481 		/* FIXME: where did these entries come from ? -- FR */
2482 		{ 0xfc800000, 0x38800000,	RTL_GIGA_MAC_VER_15 },
2483 		{ 0xfc800000, 0x30800000,	RTL_GIGA_MAC_VER_14 },
2484 
2485 		/* 8110 family. */
2486 		{ 0xfc800000, 0x98000000,	RTL_GIGA_MAC_VER_06 },
2487 		{ 0xfc800000, 0x18000000,	RTL_GIGA_MAC_VER_05 },
2488 		{ 0xfc800000, 0x10000000,	RTL_GIGA_MAC_VER_04 },
2489 		{ 0xfc800000, 0x04000000,	RTL_GIGA_MAC_VER_03 },
2490 		{ 0xfc800000, 0x00800000,	RTL_GIGA_MAC_VER_02 },
2491 		{ 0xfc800000, 0x00000000,	RTL_GIGA_MAC_VER_01 },
2492 
2493 		/* Catch-all */
2494 		{ 0x00000000, 0x00000000,	RTL_GIGA_MAC_NONE   }
2495 	};
2496 	const struct rtl_mac_info *p = mac_info;
2497 	u32 reg;
2498 
2499 	reg = RTL_R32(TxConfig);
2500 	while ((reg & p->mask) != p->val)
2501 		p++;
2502 	tp->mac_version = p->mac_version;
2503 
2504 	if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2505 		netif_notice(tp, probe, dev,
2506 			     "unknown MAC, using family default\n");
2507 		tp->mac_version = default_version;
2508 	} else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
2509 		tp->mac_version = tp->mii.supports_gmii ?
2510 				  RTL_GIGA_MAC_VER_42 :
2511 				  RTL_GIGA_MAC_VER_43;
2512 	} else if (tp->mac_version == RTL_GIGA_MAC_VER_45) {
2513 		tp->mac_version = tp->mii.supports_gmii ?
2514 				  RTL_GIGA_MAC_VER_45 :
2515 				  RTL_GIGA_MAC_VER_47;
2516 	} else if (tp->mac_version == RTL_GIGA_MAC_VER_46) {
2517 		tp->mac_version = tp->mii.supports_gmii ?
2518 				  RTL_GIGA_MAC_VER_46 :
2519 				  RTL_GIGA_MAC_VER_48;
2520 	}
2521 }
2522 
rtl8169_print_mac_version(struct rtl8169_private * tp)2523 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2524 {
2525 	dprintk("mac_version = 0x%02x\n", tp->mac_version);
2526 }
2527 
2528 struct phy_reg {
2529 	u16 reg;
2530 	u16 val;
2531 };
2532 
rtl_writephy_batch(struct rtl8169_private * tp,const struct phy_reg * regs,int len)2533 static void rtl_writephy_batch(struct rtl8169_private *tp,
2534 			       const struct phy_reg *regs, int len)
2535 {
2536 	while (len-- > 0) {
2537 		rtl_writephy(tp, regs->reg, regs->val);
2538 		regs++;
2539 	}
2540 }
2541 
2542 #define PHY_READ		0x00000000
2543 #define PHY_DATA_OR		0x10000000
2544 #define PHY_DATA_AND		0x20000000
2545 #define PHY_BJMPN		0x30000000
2546 #define PHY_MDIO_CHG		0x40000000
2547 #define PHY_CLEAR_READCOUNT	0x70000000
2548 #define PHY_WRITE		0x80000000
2549 #define PHY_READCOUNT_EQ_SKIP	0x90000000
2550 #define PHY_COMP_EQ_SKIPN	0xa0000000
2551 #define PHY_COMP_NEQ_SKIPN	0xb0000000
2552 #define PHY_WRITE_PREVIOUS	0xc0000000
2553 #define PHY_SKIPN		0xd0000000
2554 #define PHY_DELAY_MS		0xe0000000
2555 
2556 struct fw_info {
2557 	u32	magic;
2558 	char	version[RTL_VER_SIZE];
2559 	__le32	fw_start;
2560 	__le32	fw_len;
2561 	u8	chksum;
2562 } __packed;
2563 
2564 #define FW_OPCODE_SIZE	sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2565 
rtl_fw_format_ok(struct rtl8169_private * tp,struct rtl_fw * rtl_fw)2566 static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2567 {
2568 	const struct firmware *fw = rtl_fw->fw;
2569 	struct fw_info *fw_info = (struct fw_info *)fw->data;
2570 	struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2571 	char *version = rtl_fw->version;
2572 	bool rc = false;
2573 
2574 	if (fw->size < FW_OPCODE_SIZE)
2575 		goto out;
2576 
2577 	if (!fw_info->magic) {
2578 		size_t i, size, start;
2579 		u8 checksum = 0;
2580 
2581 		if (fw->size < sizeof(*fw_info))
2582 			goto out;
2583 
2584 		for (i = 0; i < fw->size; i++)
2585 			checksum += fw->data[i];
2586 		if (checksum != 0)
2587 			goto out;
2588 
2589 		start = le32_to_cpu(fw_info->fw_start);
2590 		if (start > fw->size)
2591 			goto out;
2592 
2593 		size = le32_to_cpu(fw_info->fw_len);
2594 		if (size > (fw->size - start) / FW_OPCODE_SIZE)
2595 			goto out;
2596 
2597 		memcpy(version, fw_info->version, RTL_VER_SIZE);
2598 
2599 		pa->code = (__le32 *)(fw->data + start);
2600 		pa->size = size;
2601 	} else {
2602 		if (fw->size % FW_OPCODE_SIZE)
2603 			goto out;
2604 
2605 		strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2606 
2607 		pa->code = (__le32 *)fw->data;
2608 		pa->size = fw->size / FW_OPCODE_SIZE;
2609 	}
2610 	version[RTL_VER_SIZE - 1] = 0;
2611 
2612 	rc = true;
2613 out:
2614 	return rc;
2615 }
2616 
rtl_fw_data_ok(struct rtl8169_private * tp,struct net_device * dev,struct rtl_fw_phy_action * pa)2617 static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2618 			   struct rtl_fw_phy_action *pa)
2619 {
2620 	bool rc = false;
2621 	size_t index;
2622 
2623 	for (index = 0; index < pa->size; index++) {
2624 		u32 action = le32_to_cpu(pa->code[index]);
2625 		u32 regno = (action & 0x0fff0000) >> 16;
2626 
2627 		switch(action & 0xf0000000) {
2628 		case PHY_READ:
2629 		case PHY_DATA_OR:
2630 		case PHY_DATA_AND:
2631 		case PHY_MDIO_CHG:
2632 		case PHY_CLEAR_READCOUNT:
2633 		case PHY_WRITE:
2634 		case PHY_WRITE_PREVIOUS:
2635 		case PHY_DELAY_MS:
2636 			break;
2637 
2638 		case PHY_BJMPN:
2639 			if (regno > index) {
2640 				netif_err(tp, ifup, tp->dev,
2641 					  "Out of range of firmware\n");
2642 				goto out;
2643 			}
2644 			break;
2645 		case PHY_READCOUNT_EQ_SKIP:
2646 			if (index + 2 >= pa->size) {
2647 				netif_err(tp, ifup, tp->dev,
2648 					  "Out of range of firmware\n");
2649 				goto out;
2650 			}
2651 			break;
2652 		case PHY_COMP_EQ_SKIPN:
2653 		case PHY_COMP_NEQ_SKIPN:
2654 		case PHY_SKIPN:
2655 			if (index + 1 + regno >= pa->size) {
2656 				netif_err(tp, ifup, tp->dev,
2657 					  "Out of range of firmware\n");
2658 				goto out;
2659 			}
2660 			break;
2661 
2662 		default:
2663 			netif_err(tp, ifup, tp->dev,
2664 				  "Invalid action 0x%08x\n", action);
2665 			goto out;
2666 		}
2667 	}
2668 	rc = true;
2669 out:
2670 	return rc;
2671 }
2672 
rtl_check_firmware(struct rtl8169_private * tp,struct rtl_fw * rtl_fw)2673 static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2674 {
2675 	struct net_device *dev = tp->dev;
2676 	int rc = -EINVAL;
2677 
2678 	if (!rtl_fw_format_ok(tp, rtl_fw)) {
2679 		netif_err(tp, ifup, dev, "invalid firmware\n");
2680 		goto out;
2681 	}
2682 
2683 	if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2684 		rc = 0;
2685 out:
2686 	return rc;
2687 }
2688 
rtl_phy_write_fw(struct rtl8169_private * tp,struct rtl_fw * rtl_fw)2689 static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2690 {
2691 	struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2692 	struct mdio_ops org, *ops = &tp->mdio_ops;
2693 	u32 predata, count;
2694 	size_t index;
2695 
2696 	predata = count = 0;
2697 	org.write = ops->write;
2698 	org.read = ops->read;
2699 
2700 	for (index = 0; index < pa->size; ) {
2701 		u32 action = le32_to_cpu(pa->code[index]);
2702 		u32 data = action & 0x0000ffff;
2703 		u32 regno = (action & 0x0fff0000) >> 16;
2704 
2705 		if (!action)
2706 			break;
2707 
2708 		switch(action & 0xf0000000) {
2709 		case PHY_READ:
2710 			predata = rtl_readphy(tp, regno);
2711 			count++;
2712 			index++;
2713 			break;
2714 		case PHY_DATA_OR:
2715 			predata |= data;
2716 			index++;
2717 			break;
2718 		case PHY_DATA_AND:
2719 			predata &= data;
2720 			index++;
2721 			break;
2722 		case PHY_BJMPN:
2723 			index -= regno;
2724 			break;
2725 		case PHY_MDIO_CHG:
2726 			if (data == 0) {
2727 				ops->write = org.write;
2728 				ops->read = org.read;
2729 			} else if (data == 1) {
2730 				ops->write = mac_mcu_write;
2731 				ops->read = mac_mcu_read;
2732 			}
2733 
2734 			index++;
2735 			break;
2736 		case PHY_CLEAR_READCOUNT:
2737 			count = 0;
2738 			index++;
2739 			break;
2740 		case PHY_WRITE:
2741 			rtl_writephy(tp, regno, data);
2742 			index++;
2743 			break;
2744 		case PHY_READCOUNT_EQ_SKIP:
2745 			index += (count == data) ? 2 : 1;
2746 			break;
2747 		case PHY_COMP_EQ_SKIPN:
2748 			if (predata == data)
2749 				index += regno;
2750 			index++;
2751 			break;
2752 		case PHY_COMP_NEQ_SKIPN:
2753 			if (predata != data)
2754 				index += regno;
2755 			index++;
2756 			break;
2757 		case PHY_WRITE_PREVIOUS:
2758 			rtl_writephy(tp, regno, predata);
2759 			index++;
2760 			break;
2761 		case PHY_SKIPN:
2762 			index += regno + 1;
2763 			break;
2764 		case PHY_DELAY_MS:
2765 			mdelay(data);
2766 			index++;
2767 			break;
2768 
2769 		default:
2770 			BUG();
2771 		}
2772 	}
2773 
2774 	ops->write = org.write;
2775 	ops->read = org.read;
2776 }
2777 
rtl_release_firmware(struct rtl8169_private * tp)2778 static void rtl_release_firmware(struct rtl8169_private *tp)
2779 {
2780 	if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2781 		release_firmware(tp->rtl_fw->fw);
2782 		kfree(tp->rtl_fw);
2783 	}
2784 	tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2785 }
2786 
rtl_apply_firmware(struct rtl8169_private * tp)2787 static void rtl_apply_firmware(struct rtl8169_private *tp)
2788 {
2789 	struct rtl_fw *rtl_fw = tp->rtl_fw;
2790 
2791 	/* TODO: release firmware once rtl_phy_write_fw signals failures. */
2792 	if (!IS_ERR_OR_NULL(rtl_fw))
2793 		rtl_phy_write_fw(tp, rtl_fw);
2794 }
2795 
rtl_apply_firmware_cond(struct rtl8169_private * tp,u8 reg,u16 val)2796 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2797 {
2798 	if (rtl_readphy(tp, reg) != val)
2799 		netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2800 	else
2801 		rtl_apply_firmware(tp);
2802 }
2803 
rtl8169s_hw_phy_config(struct rtl8169_private * tp)2804 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2805 {
2806 	static const struct phy_reg phy_reg_init[] = {
2807 		{ 0x1f, 0x0001 },
2808 		{ 0x06, 0x006e },
2809 		{ 0x08, 0x0708 },
2810 		{ 0x15, 0x4000 },
2811 		{ 0x18, 0x65c7 },
2812 
2813 		{ 0x1f, 0x0001 },
2814 		{ 0x03, 0x00a1 },
2815 		{ 0x02, 0x0008 },
2816 		{ 0x01, 0x0120 },
2817 		{ 0x00, 0x1000 },
2818 		{ 0x04, 0x0800 },
2819 		{ 0x04, 0x0000 },
2820 
2821 		{ 0x03, 0xff41 },
2822 		{ 0x02, 0xdf60 },
2823 		{ 0x01, 0x0140 },
2824 		{ 0x00, 0x0077 },
2825 		{ 0x04, 0x7800 },
2826 		{ 0x04, 0x7000 },
2827 
2828 		{ 0x03, 0x802f },
2829 		{ 0x02, 0x4f02 },
2830 		{ 0x01, 0x0409 },
2831 		{ 0x00, 0xf0f9 },
2832 		{ 0x04, 0x9800 },
2833 		{ 0x04, 0x9000 },
2834 
2835 		{ 0x03, 0xdf01 },
2836 		{ 0x02, 0xdf20 },
2837 		{ 0x01, 0xff95 },
2838 		{ 0x00, 0xba00 },
2839 		{ 0x04, 0xa800 },
2840 		{ 0x04, 0xa000 },
2841 
2842 		{ 0x03, 0xff41 },
2843 		{ 0x02, 0xdf20 },
2844 		{ 0x01, 0x0140 },
2845 		{ 0x00, 0x00bb },
2846 		{ 0x04, 0xb800 },
2847 		{ 0x04, 0xb000 },
2848 
2849 		{ 0x03, 0xdf41 },
2850 		{ 0x02, 0xdc60 },
2851 		{ 0x01, 0x6340 },
2852 		{ 0x00, 0x007d },
2853 		{ 0x04, 0xd800 },
2854 		{ 0x04, 0xd000 },
2855 
2856 		{ 0x03, 0xdf01 },
2857 		{ 0x02, 0xdf20 },
2858 		{ 0x01, 0x100a },
2859 		{ 0x00, 0xa0ff },
2860 		{ 0x04, 0xf800 },
2861 		{ 0x04, 0xf000 },
2862 
2863 		{ 0x1f, 0x0000 },
2864 		{ 0x0b, 0x0000 },
2865 		{ 0x00, 0x9200 }
2866 	};
2867 
2868 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2869 }
2870 
rtl8169sb_hw_phy_config(struct rtl8169_private * tp)2871 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2872 {
2873 	static const struct phy_reg phy_reg_init[] = {
2874 		{ 0x1f, 0x0002 },
2875 		{ 0x01, 0x90d0 },
2876 		{ 0x1f, 0x0000 }
2877 	};
2878 
2879 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2880 }
2881 
rtl8169scd_hw_phy_config_quirk(struct rtl8169_private * tp)2882 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2883 {
2884 	struct pci_dev *pdev = tp->pci_dev;
2885 
2886 	if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2887 	    (pdev->subsystem_device != 0xe000))
2888 		return;
2889 
2890 	rtl_writephy(tp, 0x1f, 0x0001);
2891 	rtl_writephy(tp, 0x10, 0xf01b);
2892 	rtl_writephy(tp, 0x1f, 0x0000);
2893 }
2894 
rtl8169scd_hw_phy_config(struct rtl8169_private * tp)2895 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2896 {
2897 	static const struct phy_reg phy_reg_init[] = {
2898 		{ 0x1f, 0x0001 },
2899 		{ 0x04, 0x0000 },
2900 		{ 0x03, 0x00a1 },
2901 		{ 0x02, 0x0008 },
2902 		{ 0x01, 0x0120 },
2903 		{ 0x00, 0x1000 },
2904 		{ 0x04, 0x0800 },
2905 		{ 0x04, 0x9000 },
2906 		{ 0x03, 0x802f },
2907 		{ 0x02, 0x4f02 },
2908 		{ 0x01, 0x0409 },
2909 		{ 0x00, 0xf099 },
2910 		{ 0x04, 0x9800 },
2911 		{ 0x04, 0xa000 },
2912 		{ 0x03, 0xdf01 },
2913 		{ 0x02, 0xdf20 },
2914 		{ 0x01, 0xff95 },
2915 		{ 0x00, 0xba00 },
2916 		{ 0x04, 0xa800 },
2917 		{ 0x04, 0xf000 },
2918 		{ 0x03, 0xdf01 },
2919 		{ 0x02, 0xdf20 },
2920 		{ 0x01, 0x101a },
2921 		{ 0x00, 0xa0ff },
2922 		{ 0x04, 0xf800 },
2923 		{ 0x04, 0x0000 },
2924 		{ 0x1f, 0x0000 },
2925 
2926 		{ 0x1f, 0x0001 },
2927 		{ 0x10, 0xf41b },
2928 		{ 0x14, 0xfb54 },
2929 		{ 0x18, 0xf5c7 },
2930 		{ 0x1f, 0x0000 },
2931 
2932 		{ 0x1f, 0x0001 },
2933 		{ 0x17, 0x0cc0 },
2934 		{ 0x1f, 0x0000 }
2935 	};
2936 
2937 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2938 
2939 	rtl8169scd_hw_phy_config_quirk(tp);
2940 }
2941 
rtl8169sce_hw_phy_config(struct rtl8169_private * tp)2942 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2943 {
2944 	static const struct phy_reg phy_reg_init[] = {
2945 		{ 0x1f, 0x0001 },
2946 		{ 0x04, 0x0000 },
2947 		{ 0x03, 0x00a1 },
2948 		{ 0x02, 0x0008 },
2949 		{ 0x01, 0x0120 },
2950 		{ 0x00, 0x1000 },
2951 		{ 0x04, 0x0800 },
2952 		{ 0x04, 0x9000 },
2953 		{ 0x03, 0x802f },
2954 		{ 0x02, 0x4f02 },
2955 		{ 0x01, 0x0409 },
2956 		{ 0x00, 0xf099 },
2957 		{ 0x04, 0x9800 },
2958 		{ 0x04, 0xa000 },
2959 		{ 0x03, 0xdf01 },
2960 		{ 0x02, 0xdf20 },
2961 		{ 0x01, 0xff95 },
2962 		{ 0x00, 0xba00 },
2963 		{ 0x04, 0xa800 },
2964 		{ 0x04, 0xf000 },
2965 		{ 0x03, 0xdf01 },
2966 		{ 0x02, 0xdf20 },
2967 		{ 0x01, 0x101a },
2968 		{ 0x00, 0xa0ff },
2969 		{ 0x04, 0xf800 },
2970 		{ 0x04, 0x0000 },
2971 		{ 0x1f, 0x0000 },
2972 
2973 		{ 0x1f, 0x0001 },
2974 		{ 0x0b, 0x8480 },
2975 		{ 0x1f, 0x0000 },
2976 
2977 		{ 0x1f, 0x0001 },
2978 		{ 0x18, 0x67c7 },
2979 		{ 0x04, 0x2000 },
2980 		{ 0x03, 0x002f },
2981 		{ 0x02, 0x4360 },
2982 		{ 0x01, 0x0109 },
2983 		{ 0x00, 0x3022 },
2984 		{ 0x04, 0x2800 },
2985 		{ 0x1f, 0x0000 },
2986 
2987 		{ 0x1f, 0x0001 },
2988 		{ 0x17, 0x0cc0 },
2989 		{ 0x1f, 0x0000 }
2990 	};
2991 
2992 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2993 }
2994 
rtl8168bb_hw_phy_config(struct rtl8169_private * tp)2995 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2996 {
2997 	static const struct phy_reg phy_reg_init[] = {
2998 		{ 0x10, 0xf41b },
2999 		{ 0x1f, 0x0000 }
3000 	};
3001 
3002 	rtl_writephy(tp, 0x1f, 0x0001);
3003 	rtl_patchphy(tp, 0x16, 1 << 0);
3004 
3005 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3006 }
3007 
rtl8168bef_hw_phy_config(struct rtl8169_private * tp)3008 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
3009 {
3010 	static const struct phy_reg phy_reg_init[] = {
3011 		{ 0x1f, 0x0001 },
3012 		{ 0x10, 0xf41b },
3013 		{ 0x1f, 0x0000 }
3014 	};
3015 
3016 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3017 }
3018 
rtl8168cp_1_hw_phy_config(struct rtl8169_private * tp)3019 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
3020 {
3021 	static const struct phy_reg phy_reg_init[] = {
3022 		{ 0x1f, 0x0000 },
3023 		{ 0x1d, 0x0f00 },
3024 		{ 0x1f, 0x0002 },
3025 		{ 0x0c, 0x1ec8 },
3026 		{ 0x1f, 0x0000 }
3027 	};
3028 
3029 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3030 }
3031 
rtl8168cp_2_hw_phy_config(struct rtl8169_private * tp)3032 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
3033 {
3034 	static const struct phy_reg phy_reg_init[] = {
3035 		{ 0x1f, 0x0001 },
3036 		{ 0x1d, 0x3d98 },
3037 		{ 0x1f, 0x0000 }
3038 	};
3039 
3040 	rtl_writephy(tp, 0x1f, 0x0000);
3041 	rtl_patchphy(tp, 0x14, 1 << 5);
3042 	rtl_patchphy(tp, 0x0d, 1 << 5);
3043 
3044 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3045 }
3046 
rtl8168c_1_hw_phy_config(struct rtl8169_private * tp)3047 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
3048 {
3049 	static const struct phy_reg phy_reg_init[] = {
3050 		{ 0x1f, 0x0001 },
3051 		{ 0x12, 0x2300 },
3052 		{ 0x1f, 0x0002 },
3053 		{ 0x00, 0x88d4 },
3054 		{ 0x01, 0x82b1 },
3055 		{ 0x03, 0x7002 },
3056 		{ 0x08, 0x9e30 },
3057 		{ 0x09, 0x01f0 },
3058 		{ 0x0a, 0x5500 },
3059 		{ 0x0c, 0x00c8 },
3060 		{ 0x1f, 0x0003 },
3061 		{ 0x12, 0xc096 },
3062 		{ 0x16, 0x000a },
3063 		{ 0x1f, 0x0000 },
3064 		{ 0x1f, 0x0000 },
3065 		{ 0x09, 0x2000 },
3066 		{ 0x09, 0x0000 }
3067 	};
3068 
3069 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3070 
3071 	rtl_patchphy(tp, 0x14, 1 << 5);
3072 	rtl_patchphy(tp, 0x0d, 1 << 5);
3073 	rtl_writephy(tp, 0x1f, 0x0000);
3074 }
3075 
rtl8168c_2_hw_phy_config(struct rtl8169_private * tp)3076 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
3077 {
3078 	static const struct phy_reg phy_reg_init[] = {
3079 		{ 0x1f, 0x0001 },
3080 		{ 0x12, 0x2300 },
3081 		{ 0x03, 0x802f },
3082 		{ 0x02, 0x4f02 },
3083 		{ 0x01, 0x0409 },
3084 		{ 0x00, 0xf099 },
3085 		{ 0x04, 0x9800 },
3086 		{ 0x04, 0x9000 },
3087 		{ 0x1d, 0x3d98 },
3088 		{ 0x1f, 0x0002 },
3089 		{ 0x0c, 0x7eb8 },
3090 		{ 0x06, 0x0761 },
3091 		{ 0x1f, 0x0003 },
3092 		{ 0x16, 0x0f0a },
3093 		{ 0x1f, 0x0000 }
3094 	};
3095 
3096 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3097 
3098 	rtl_patchphy(tp, 0x16, 1 << 0);
3099 	rtl_patchphy(tp, 0x14, 1 << 5);
3100 	rtl_patchphy(tp, 0x0d, 1 << 5);
3101 	rtl_writephy(tp, 0x1f, 0x0000);
3102 }
3103 
rtl8168c_3_hw_phy_config(struct rtl8169_private * tp)3104 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
3105 {
3106 	static const struct phy_reg phy_reg_init[] = {
3107 		{ 0x1f, 0x0001 },
3108 		{ 0x12, 0x2300 },
3109 		{ 0x1d, 0x3d98 },
3110 		{ 0x1f, 0x0002 },
3111 		{ 0x0c, 0x7eb8 },
3112 		{ 0x06, 0x5461 },
3113 		{ 0x1f, 0x0003 },
3114 		{ 0x16, 0x0f0a },
3115 		{ 0x1f, 0x0000 }
3116 	};
3117 
3118 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3119 
3120 	rtl_patchphy(tp, 0x16, 1 << 0);
3121 	rtl_patchphy(tp, 0x14, 1 << 5);
3122 	rtl_patchphy(tp, 0x0d, 1 << 5);
3123 	rtl_writephy(tp, 0x1f, 0x0000);
3124 }
3125 
rtl8168c_4_hw_phy_config(struct rtl8169_private * tp)3126 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
3127 {
3128 	rtl8168c_3_hw_phy_config(tp);
3129 }
3130 
rtl8168d_1_hw_phy_config(struct rtl8169_private * tp)3131 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
3132 {
3133 	static const struct phy_reg phy_reg_init_0[] = {
3134 		/* Channel Estimation */
3135 		{ 0x1f, 0x0001 },
3136 		{ 0x06, 0x4064 },
3137 		{ 0x07, 0x2863 },
3138 		{ 0x08, 0x059c },
3139 		{ 0x09, 0x26b4 },
3140 		{ 0x0a, 0x6a19 },
3141 		{ 0x0b, 0xdcc8 },
3142 		{ 0x10, 0xf06d },
3143 		{ 0x14, 0x7f68 },
3144 		{ 0x18, 0x7fd9 },
3145 		{ 0x1c, 0xf0ff },
3146 		{ 0x1d, 0x3d9c },
3147 		{ 0x1f, 0x0003 },
3148 		{ 0x12, 0xf49f },
3149 		{ 0x13, 0x070b },
3150 		{ 0x1a, 0x05ad },
3151 		{ 0x14, 0x94c0 },
3152 
3153 		/*
3154 		 * Tx Error Issue
3155 		 * Enhance line driver power
3156 		 */
3157 		{ 0x1f, 0x0002 },
3158 		{ 0x06, 0x5561 },
3159 		{ 0x1f, 0x0005 },
3160 		{ 0x05, 0x8332 },
3161 		{ 0x06, 0x5561 },
3162 
3163 		/*
3164 		 * Can not link to 1Gbps with bad cable
3165 		 * Decrease SNR threshold form 21.07dB to 19.04dB
3166 		 */
3167 		{ 0x1f, 0x0001 },
3168 		{ 0x17, 0x0cc0 },
3169 
3170 		{ 0x1f, 0x0000 },
3171 		{ 0x0d, 0xf880 }
3172 	};
3173 
3174 	rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
3175 
3176 	/*
3177 	 * Rx Error Issue
3178 	 * Fine Tune Switching regulator parameter
3179 	 */
3180 	rtl_writephy(tp, 0x1f, 0x0002);
3181 	rtl_w0w1_phy(tp, 0x0b, 0x0010, 0x00ef);
3182 	rtl_w0w1_phy(tp, 0x0c, 0xa200, 0x5d00);
3183 
3184 	if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
3185 		static const struct phy_reg phy_reg_init[] = {
3186 			{ 0x1f, 0x0002 },
3187 			{ 0x05, 0x669a },
3188 			{ 0x1f, 0x0005 },
3189 			{ 0x05, 0x8330 },
3190 			{ 0x06, 0x669a },
3191 			{ 0x1f, 0x0002 }
3192 		};
3193 		int val;
3194 
3195 		rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3196 
3197 		val = rtl_readphy(tp, 0x0d);
3198 
3199 		if ((val & 0x00ff) != 0x006c) {
3200 			static const u32 set[] = {
3201 				0x0065, 0x0066, 0x0067, 0x0068,
3202 				0x0069, 0x006a, 0x006b, 0x006c
3203 			};
3204 			int i;
3205 
3206 			rtl_writephy(tp, 0x1f, 0x0002);
3207 
3208 			val &= 0xff00;
3209 			for (i = 0; i < ARRAY_SIZE(set); i++)
3210 				rtl_writephy(tp, 0x0d, val | set[i]);
3211 		}
3212 	} else {
3213 		static const struct phy_reg phy_reg_init[] = {
3214 			{ 0x1f, 0x0002 },
3215 			{ 0x05, 0x6662 },
3216 			{ 0x1f, 0x0005 },
3217 			{ 0x05, 0x8330 },
3218 			{ 0x06, 0x6662 }
3219 		};
3220 
3221 		rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3222 	}
3223 
3224 	/* RSET couple improve */
3225 	rtl_writephy(tp, 0x1f, 0x0002);
3226 	rtl_patchphy(tp, 0x0d, 0x0300);
3227 	rtl_patchphy(tp, 0x0f, 0x0010);
3228 
3229 	/* Fine tune PLL performance */
3230 	rtl_writephy(tp, 0x1f, 0x0002);
3231 	rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
3232 	rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
3233 
3234 	rtl_writephy(tp, 0x1f, 0x0005);
3235 	rtl_writephy(tp, 0x05, 0x001b);
3236 
3237 	rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
3238 
3239 	rtl_writephy(tp, 0x1f, 0x0000);
3240 }
3241 
rtl8168d_2_hw_phy_config(struct rtl8169_private * tp)3242 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
3243 {
3244 	static const struct phy_reg phy_reg_init_0[] = {
3245 		/* Channel Estimation */
3246 		{ 0x1f, 0x0001 },
3247 		{ 0x06, 0x4064 },
3248 		{ 0x07, 0x2863 },
3249 		{ 0x08, 0x059c },
3250 		{ 0x09, 0x26b4 },
3251 		{ 0x0a, 0x6a19 },
3252 		{ 0x0b, 0xdcc8 },
3253 		{ 0x10, 0xf06d },
3254 		{ 0x14, 0x7f68 },
3255 		{ 0x18, 0x7fd9 },
3256 		{ 0x1c, 0xf0ff },
3257 		{ 0x1d, 0x3d9c },
3258 		{ 0x1f, 0x0003 },
3259 		{ 0x12, 0xf49f },
3260 		{ 0x13, 0x070b },
3261 		{ 0x1a, 0x05ad },
3262 		{ 0x14, 0x94c0 },
3263 
3264 		/*
3265 		 * Tx Error Issue
3266 		 * Enhance line driver power
3267 		 */
3268 		{ 0x1f, 0x0002 },
3269 		{ 0x06, 0x5561 },
3270 		{ 0x1f, 0x0005 },
3271 		{ 0x05, 0x8332 },
3272 		{ 0x06, 0x5561 },
3273 
3274 		/*
3275 		 * Can not link to 1Gbps with bad cable
3276 		 * Decrease SNR threshold form 21.07dB to 19.04dB
3277 		 */
3278 		{ 0x1f, 0x0001 },
3279 		{ 0x17, 0x0cc0 },
3280 
3281 		{ 0x1f, 0x0000 },
3282 		{ 0x0d, 0xf880 }
3283 	};
3284 
3285 	rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
3286 
3287 	if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
3288 		static const struct phy_reg phy_reg_init[] = {
3289 			{ 0x1f, 0x0002 },
3290 			{ 0x05, 0x669a },
3291 			{ 0x1f, 0x0005 },
3292 			{ 0x05, 0x8330 },
3293 			{ 0x06, 0x669a },
3294 
3295 			{ 0x1f, 0x0002 }
3296 		};
3297 		int val;
3298 
3299 		rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3300 
3301 		val = rtl_readphy(tp, 0x0d);
3302 		if ((val & 0x00ff) != 0x006c) {
3303 			static const u32 set[] = {
3304 				0x0065, 0x0066, 0x0067, 0x0068,
3305 				0x0069, 0x006a, 0x006b, 0x006c
3306 			};
3307 			int i;
3308 
3309 			rtl_writephy(tp, 0x1f, 0x0002);
3310 
3311 			val &= 0xff00;
3312 			for (i = 0; i < ARRAY_SIZE(set); i++)
3313 				rtl_writephy(tp, 0x0d, val | set[i]);
3314 		}
3315 	} else {
3316 		static const struct phy_reg phy_reg_init[] = {
3317 			{ 0x1f, 0x0002 },
3318 			{ 0x05, 0x2642 },
3319 			{ 0x1f, 0x0005 },
3320 			{ 0x05, 0x8330 },
3321 			{ 0x06, 0x2642 }
3322 		};
3323 
3324 		rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3325 	}
3326 
3327 	/* Fine tune PLL performance */
3328 	rtl_writephy(tp, 0x1f, 0x0002);
3329 	rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
3330 	rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
3331 
3332 	/* Switching regulator Slew rate */
3333 	rtl_writephy(tp, 0x1f, 0x0002);
3334 	rtl_patchphy(tp, 0x0f, 0x0017);
3335 
3336 	rtl_writephy(tp, 0x1f, 0x0005);
3337 	rtl_writephy(tp, 0x05, 0x001b);
3338 
3339 	rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
3340 
3341 	rtl_writephy(tp, 0x1f, 0x0000);
3342 }
3343 
rtl8168d_3_hw_phy_config(struct rtl8169_private * tp)3344 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
3345 {
3346 	static const struct phy_reg phy_reg_init[] = {
3347 		{ 0x1f, 0x0002 },
3348 		{ 0x10, 0x0008 },
3349 		{ 0x0d, 0x006c },
3350 
3351 		{ 0x1f, 0x0000 },
3352 		{ 0x0d, 0xf880 },
3353 
3354 		{ 0x1f, 0x0001 },
3355 		{ 0x17, 0x0cc0 },
3356 
3357 		{ 0x1f, 0x0001 },
3358 		{ 0x0b, 0xa4d8 },
3359 		{ 0x09, 0x281c },
3360 		{ 0x07, 0x2883 },
3361 		{ 0x0a, 0x6b35 },
3362 		{ 0x1d, 0x3da4 },
3363 		{ 0x1c, 0xeffd },
3364 		{ 0x14, 0x7f52 },
3365 		{ 0x18, 0x7fc6 },
3366 		{ 0x08, 0x0601 },
3367 		{ 0x06, 0x4063 },
3368 		{ 0x10, 0xf074 },
3369 		{ 0x1f, 0x0003 },
3370 		{ 0x13, 0x0789 },
3371 		{ 0x12, 0xf4bd },
3372 		{ 0x1a, 0x04fd },
3373 		{ 0x14, 0x84b0 },
3374 		{ 0x1f, 0x0000 },
3375 		{ 0x00, 0x9200 },
3376 
3377 		{ 0x1f, 0x0005 },
3378 		{ 0x01, 0x0340 },
3379 		{ 0x1f, 0x0001 },
3380 		{ 0x04, 0x4000 },
3381 		{ 0x03, 0x1d21 },
3382 		{ 0x02, 0x0c32 },
3383 		{ 0x01, 0x0200 },
3384 		{ 0x00, 0x5554 },
3385 		{ 0x04, 0x4800 },
3386 		{ 0x04, 0x4000 },
3387 		{ 0x04, 0xf000 },
3388 		{ 0x03, 0xdf01 },
3389 		{ 0x02, 0xdf20 },
3390 		{ 0x01, 0x101a },
3391 		{ 0x00, 0xa0ff },
3392 		{ 0x04, 0xf800 },
3393 		{ 0x04, 0xf000 },
3394 		{ 0x1f, 0x0000 },
3395 
3396 		{ 0x1f, 0x0007 },
3397 		{ 0x1e, 0x0023 },
3398 		{ 0x16, 0x0000 },
3399 		{ 0x1f, 0x0000 }
3400 	};
3401 
3402 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3403 }
3404 
rtl8168d_4_hw_phy_config(struct rtl8169_private * tp)3405 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
3406 {
3407 	static const struct phy_reg phy_reg_init[] = {
3408 		{ 0x1f, 0x0001 },
3409 		{ 0x17, 0x0cc0 },
3410 
3411 		{ 0x1f, 0x0007 },
3412 		{ 0x1e, 0x002d },
3413 		{ 0x18, 0x0040 },
3414 		{ 0x1f, 0x0000 }
3415 	};
3416 
3417 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3418 	rtl_patchphy(tp, 0x0d, 1 << 5);
3419 }
3420 
rtl8168e_1_hw_phy_config(struct rtl8169_private * tp)3421 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3422 {
3423 	static const struct phy_reg phy_reg_init[] = {
3424 		/* Enable Delay cap */
3425 		{ 0x1f, 0x0005 },
3426 		{ 0x05, 0x8b80 },
3427 		{ 0x06, 0xc896 },
3428 		{ 0x1f, 0x0000 },
3429 
3430 		/* Channel estimation fine tune */
3431 		{ 0x1f, 0x0001 },
3432 		{ 0x0b, 0x6c20 },
3433 		{ 0x07, 0x2872 },
3434 		{ 0x1c, 0xefff },
3435 		{ 0x1f, 0x0003 },
3436 		{ 0x14, 0x6420 },
3437 		{ 0x1f, 0x0000 },
3438 
3439 		/* Update PFM & 10M TX idle timer */
3440 		{ 0x1f, 0x0007 },
3441 		{ 0x1e, 0x002f },
3442 		{ 0x15, 0x1919 },
3443 		{ 0x1f, 0x0000 },
3444 
3445 		{ 0x1f, 0x0007 },
3446 		{ 0x1e, 0x00ac },
3447 		{ 0x18, 0x0006 },
3448 		{ 0x1f, 0x0000 }
3449 	};
3450 
3451 	rtl_apply_firmware(tp);
3452 
3453 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3454 
3455 	/* DCO enable for 10M IDLE Power */
3456 	rtl_writephy(tp, 0x1f, 0x0007);
3457 	rtl_writephy(tp, 0x1e, 0x0023);
3458 	rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000);
3459 	rtl_writephy(tp, 0x1f, 0x0000);
3460 
3461 	/* For impedance matching */
3462 	rtl_writephy(tp, 0x1f, 0x0002);
3463 	rtl_w0w1_phy(tp, 0x08, 0x8000, 0x7f00);
3464 	rtl_writephy(tp, 0x1f, 0x0000);
3465 
3466 	/* PHY auto speed down */
3467 	rtl_writephy(tp, 0x1f, 0x0007);
3468 	rtl_writephy(tp, 0x1e, 0x002d);
3469 	rtl_w0w1_phy(tp, 0x18, 0x0050, 0x0000);
3470 	rtl_writephy(tp, 0x1f, 0x0000);
3471 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3472 
3473 	rtl_writephy(tp, 0x1f, 0x0005);
3474 	rtl_writephy(tp, 0x05, 0x8b86);
3475 	rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
3476 	rtl_writephy(tp, 0x1f, 0x0000);
3477 
3478 	rtl_writephy(tp, 0x1f, 0x0005);
3479 	rtl_writephy(tp, 0x05, 0x8b85);
3480 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
3481 	rtl_writephy(tp, 0x1f, 0x0007);
3482 	rtl_writephy(tp, 0x1e, 0x0020);
3483 	rtl_w0w1_phy(tp, 0x15, 0x0000, 0x1100);
3484 	rtl_writephy(tp, 0x1f, 0x0006);
3485 	rtl_writephy(tp, 0x00, 0x5a00);
3486 	rtl_writephy(tp, 0x1f, 0x0000);
3487 	rtl_writephy(tp, 0x0d, 0x0007);
3488 	rtl_writephy(tp, 0x0e, 0x003c);
3489 	rtl_writephy(tp, 0x0d, 0x4007);
3490 	rtl_writephy(tp, 0x0e, 0x0000);
3491 	rtl_writephy(tp, 0x0d, 0x0000);
3492 }
3493 
rtl_rar_exgmac_set(struct rtl8169_private * tp,u8 * addr)3494 static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
3495 {
3496 	const u16 w[] = {
3497 		addr[0] | (addr[1] << 8),
3498 		addr[2] | (addr[3] << 8),
3499 		addr[4] | (addr[5] << 8)
3500 	};
3501 	const struct exgmac_reg e[] = {
3502 		{ .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) },
3503 		{ .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] },
3504 		{ .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 },
3505 		{ .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) }
3506 	};
3507 
3508 	rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3509 }
3510 
rtl8168e_2_hw_phy_config(struct rtl8169_private * tp)3511 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3512 {
3513 	static const struct phy_reg phy_reg_init[] = {
3514 		/* Enable Delay cap */
3515 		{ 0x1f, 0x0004 },
3516 		{ 0x1f, 0x0007 },
3517 		{ 0x1e, 0x00ac },
3518 		{ 0x18, 0x0006 },
3519 		{ 0x1f, 0x0002 },
3520 		{ 0x1f, 0x0000 },
3521 		{ 0x1f, 0x0000 },
3522 
3523 		/* Channel estimation fine tune */
3524 		{ 0x1f, 0x0003 },
3525 		{ 0x09, 0xa20f },
3526 		{ 0x1f, 0x0000 },
3527 		{ 0x1f, 0x0000 },
3528 
3529 		/* Green Setting */
3530 		{ 0x1f, 0x0005 },
3531 		{ 0x05, 0x8b5b },
3532 		{ 0x06, 0x9222 },
3533 		{ 0x05, 0x8b6d },
3534 		{ 0x06, 0x8000 },
3535 		{ 0x05, 0x8b76 },
3536 		{ 0x06, 0x8000 },
3537 		{ 0x1f, 0x0000 }
3538 	};
3539 
3540 	rtl_apply_firmware(tp);
3541 
3542 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3543 
3544 	/* For 4-corner performance improve */
3545 	rtl_writephy(tp, 0x1f, 0x0005);
3546 	rtl_writephy(tp, 0x05, 0x8b80);
3547 	rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000);
3548 	rtl_writephy(tp, 0x1f, 0x0000);
3549 
3550 	/* PHY auto speed down */
3551 	rtl_writephy(tp, 0x1f, 0x0004);
3552 	rtl_writephy(tp, 0x1f, 0x0007);
3553 	rtl_writephy(tp, 0x1e, 0x002d);
3554 	rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000);
3555 	rtl_writephy(tp, 0x1f, 0x0002);
3556 	rtl_writephy(tp, 0x1f, 0x0000);
3557 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3558 
3559 	/* improve 10M EEE waveform */
3560 	rtl_writephy(tp, 0x1f, 0x0005);
3561 	rtl_writephy(tp, 0x05, 0x8b86);
3562 	rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
3563 	rtl_writephy(tp, 0x1f, 0x0000);
3564 
3565 	/* Improve 2-pair detection performance */
3566 	rtl_writephy(tp, 0x1f, 0x0005);
3567 	rtl_writephy(tp, 0x05, 0x8b85);
3568 	rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
3569 	rtl_writephy(tp, 0x1f, 0x0000);
3570 
3571 	/* EEE setting */
3572 	rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3573 	rtl_writephy(tp, 0x1f, 0x0005);
3574 	rtl_writephy(tp, 0x05, 0x8b85);
3575 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
3576 	rtl_writephy(tp, 0x1f, 0x0004);
3577 	rtl_writephy(tp, 0x1f, 0x0007);
3578 	rtl_writephy(tp, 0x1e, 0x0020);
3579 	rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100);
3580 	rtl_writephy(tp, 0x1f, 0x0002);
3581 	rtl_writephy(tp, 0x1f, 0x0000);
3582 	rtl_writephy(tp, 0x0d, 0x0007);
3583 	rtl_writephy(tp, 0x0e, 0x003c);
3584 	rtl_writephy(tp, 0x0d, 0x4007);
3585 	rtl_writephy(tp, 0x0e, 0x0000);
3586 	rtl_writephy(tp, 0x0d, 0x0000);
3587 
3588 	/* Green feature */
3589 	rtl_writephy(tp, 0x1f, 0x0003);
3590 	rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001);
3591 	rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400);
3592 	rtl_writephy(tp, 0x1f, 0x0000);
3593 
3594 	/* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3595 	rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
3596 }
3597 
rtl8168f_hw_phy_config(struct rtl8169_private * tp)3598 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3599 {
3600 	/* For 4-corner performance improve */
3601 	rtl_writephy(tp, 0x1f, 0x0005);
3602 	rtl_writephy(tp, 0x05, 0x8b80);
3603 	rtl_w0w1_phy(tp, 0x06, 0x0006, 0x0000);
3604 	rtl_writephy(tp, 0x1f, 0x0000);
3605 
3606 	/* PHY auto speed down */
3607 	rtl_writephy(tp, 0x1f, 0x0007);
3608 	rtl_writephy(tp, 0x1e, 0x002d);
3609 	rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000);
3610 	rtl_writephy(tp, 0x1f, 0x0000);
3611 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3612 
3613 	/* Improve 10M EEE waveform */
3614 	rtl_writephy(tp, 0x1f, 0x0005);
3615 	rtl_writephy(tp, 0x05, 0x8b86);
3616 	rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
3617 	rtl_writephy(tp, 0x1f, 0x0000);
3618 }
3619 
rtl8168f_1_hw_phy_config(struct rtl8169_private * tp)3620 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3621 {
3622 	static const struct phy_reg phy_reg_init[] = {
3623 		/* Channel estimation fine tune */
3624 		{ 0x1f, 0x0003 },
3625 		{ 0x09, 0xa20f },
3626 		{ 0x1f, 0x0000 },
3627 
3628 		/* Modify green table for giga & fnet */
3629 		{ 0x1f, 0x0005 },
3630 		{ 0x05, 0x8b55 },
3631 		{ 0x06, 0x0000 },
3632 		{ 0x05, 0x8b5e },
3633 		{ 0x06, 0x0000 },
3634 		{ 0x05, 0x8b67 },
3635 		{ 0x06, 0x0000 },
3636 		{ 0x05, 0x8b70 },
3637 		{ 0x06, 0x0000 },
3638 		{ 0x1f, 0x0000 },
3639 		{ 0x1f, 0x0007 },
3640 		{ 0x1e, 0x0078 },
3641 		{ 0x17, 0x0000 },
3642 		{ 0x19, 0x00fb },
3643 		{ 0x1f, 0x0000 },
3644 
3645 		/* Modify green table for 10M */
3646 		{ 0x1f, 0x0005 },
3647 		{ 0x05, 0x8b79 },
3648 		{ 0x06, 0xaa00 },
3649 		{ 0x1f, 0x0000 },
3650 
3651 		/* Disable hiimpedance detection (RTCT) */
3652 		{ 0x1f, 0x0003 },
3653 		{ 0x01, 0x328a },
3654 		{ 0x1f, 0x0000 }
3655 	};
3656 
3657 	rtl_apply_firmware(tp);
3658 
3659 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3660 
3661 	rtl8168f_hw_phy_config(tp);
3662 
3663 	/* Improve 2-pair detection performance */
3664 	rtl_writephy(tp, 0x1f, 0x0005);
3665 	rtl_writephy(tp, 0x05, 0x8b85);
3666 	rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
3667 	rtl_writephy(tp, 0x1f, 0x0000);
3668 }
3669 
rtl8168f_2_hw_phy_config(struct rtl8169_private * tp)3670 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3671 {
3672 	rtl_apply_firmware(tp);
3673 
3674 	rtl8168f_hw_phy_config(tp);
3675 }
3676 
rtl8411_hw_phy_config(struct rtl8169_private * tp)3677 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3678 {
3679 	static const struct phy_reg phy_reg_init[] = {
3680 		/* Channel estimation fine tune */
3681 		{ 0x1f, 0x0003 },
3682 		{ 0x09, 0xa20f },
3683 		{ 0x1f, 0x0000 },
3684 
3685 		/* Modify green table for giga & fnet */
3686 		{ 0x1f, 0x0005 },
3687 		{ 0x05, 0x8b55 },
3688 		{ 0x06, 0x0000 },
3689 		{ 0x05, 0x8b5e },
3690 		{ 0x06, 0x0000 },
3691 		{ 0x05, 0x8b67 },
3692 		{ 0x06, 0x0000 },
3693 		{ 0x05, 0x8b70 },
3694 		{ 0x06, 0x0000 },
3695 		{ 0x1f, 0x0000 },
3696 		{ 0x1f, 0x0007 },
3697 		{ 0x1e, 0x0078 },
3698 		{ 0x17, 0x0000 },
3699 		{ 0x19, 0x00aa },
3700 		{ 0x1f, 0x0000 },
3701 
3702 		/* Modify green table for 10M */
3703 		{ 0x1f, 0x0005 },
3704 		{ 0x05, 0x8b79 },
3705 		{ 0x06, 0xaa00 },
3706 		{ 0x1f, 0x0000 },
3707 
3708 		/* Disable hiimpedance detection (RTCT) */
3709 		{ 0x1f, 0x0003 },
3710 		{ 0x01, 0x328a },
3711 		{ 0x1f, 0x0000 }
3712 	};
3713 
3714 
3715 	rtl_apply_firmware(tp);
3716 
3717 	rtl8168f_hw_phy_config(tp);
3718 
3719 	/* Improve 2-pair detection performance */
3720 	rtl_writephy(tp, 0x1f, 0x0005);
3721 	rtl_writephy(tp, 0x05, 0x8b85);
3722 	rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
3723 	rtl_writephy(tp, 0x1f, 0x0000);
3724 
3725 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3726 
3727 	/* Modify green table for giga */
3728 	rtl_writephy(tp, 0x1f, 0x0005);
3729 	rtl_writephy(tp, 0x05, 0x8b54);
3730 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800);
3731 	rtl_writephy(tp, 0x05, 0x8b5d);
3732 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800);
3733 	rtl_writephy(tp, 0x05, 0x8a7c);
3734 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3735 	rtl_writephy(tp, 0x05, 0x8a7f);
3736 	rtl_w0w1_phy(tp, 0x06, 0x0100, 0x0000);
3737 	rtl_writephy(tp, 0x05, 0x8a82);
3738 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3739 	rtl_writephy(tp, 0x05, 0x8a85);
3740 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3741 	rtl_writephy(tp, 0x05, 0x8a88);
3742 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3743 	rtl_writephy(tp, 0x1f, 0x0000);
3744 
3745 	/* uc same-seed solution */
3746 	rtl_writephy(tp, 0x1f, 0x0005);
3747 	rtl_writephy(tp, 0x05, 0x8b85);
3748 	rtl_w0w1_phy(tp, 0x06, 0x8000, 0x0000);
3749 	rtl_writephy(tp, 0x1f, 0x0000);
3750 
3751 	/* eee setting */
3752 	rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3753 	rtl_writephy(tp, 0x1f, 0x0005);
3754 	rtl_writephy(tp, 0x05, 0x8b85);
3755 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
3756 	rtl_writephy(tp, 0x1f, 0x0004);
3757 	rtl_writephy(tp, 0x1f, 0x0007);
3758 	rtl_writephy(tp, 0x1e, 0x0020);
3759 	rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100);
3760 	rtl_writephy(tp, 0x1f, 0x0000);
3761 	rtl_writephy(tp, 0x0d, 0x0007);
3762 	rtl_writephy(tp, 0x0e, 0x003c);
3763 	rtl_writephy(tp, 0x0d, 0x4007);
3764 	rtl_writephy(tp, 0x0e, 0x0000);
3765 	rtl_writephy(tp, 0x0d, 0x0000);
3766 
3767 	/* Green feature */
3768 	rtl_writephy(tp, 0x1f, 0x0003);
3769 	rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001);
3770 	rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400);
3771 	rtl_writephy(tp, 0x1f, 0x0000);
3772 }
3773 
rtl8168g_1_hw_phy_config(struct rtl8169_private * tp)3774 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3775 {
3776 	rtl_apply_firmware(tp);
3777 
3778 	rtl_writephy(tp, 0x1f, 0x0a46);
3779 	if (rtl_readphy(tp, 0x10) & 0x0100) {
3780 		rtl_writephy(tp, 0x1f, 0x0bcc);
3781 		rtl_w0w1_phy(tp, 0x12, 0x0000, 0x8000);
3782 	} else {
3783 		rtl_writephy(tp, 0x1f, 0x0bcc);
3784 		rtl_w0w1_phy(tp, 0x12, 0x8000, 0x0000);
3785 	}
3786 
3787 	rtl_writephy(tp, 0x1f, 0x0a46);
3788 	if (rtl_readphy(tp, 0x13) & 0x0100) {
3789 		rtl_writephy(tp, 0x1f, 0x0c41);
3790 		rtl_w0w1_phy(tp, 0x15, 0x0002, 0x0000);
3791 	} else {
3792 		rtl_writephy(tp, 0x1f, 0x0c41);
3793 		rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0002);
3794 	}
3795 
3796 	/* Enable PHY auto speed down */
3797 	rtl_writephy(tp, 0x1f, 0x0a44);
3798 	rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000);
3799 
3800 	rtl_writephy(tp, 0x1f, 0x0bcc);
3801 	rtl_w0w1_phy(tp, 0x14, 0x0100, 0x0000);
3802 	rtl_writephy(tp, 0x1f, 0x0a44);
3803 	rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000);
3804 	rtl_writephy(tp, 0x1f, 0x0a43);
3805 	rtl_writephy(tp, 0x13, 0x8084);
3806 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000);
3807 	rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000);
3808 
3809 	/* EEE auto-fallback function */
3810 	rtl_writephy(tp, 0x1f, 0x0a4b);
3811 	rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000);
3812 
3813 	/* Enable UC LPF tune function */
3814 	rtl_writephy(tp, 0x1f, 0x0a43);
3815 	rtl_writephy(tp, 0x13, 0x8012);
3816 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3817 
3818 	rtl_writephy(tp, 0x1f, 0x0c42);
3819 	rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000);
3820 
3821 	/* Improve SWR Efficiency */
3822 	rtl_writephy(tp, 0x1f, 0x0bcd);
3823 	rtl_writephy(tp, 0x14, 0x5065);
3824 	rtl_writephy(tp, 0x14, 0xd065);
3825 	rtl_writephy(tp, 0x1f, 0x0bc8);
3826 	rtl_writephy(tp, 0x11, 0x5655);
3827 	rtl_writephy(tp, 0x1f, 0x0bcd);
3828 	rtl_writephy(tp, 0x14, 0x1065);
3829 	rtl_writephy(tp, 0x14, 0x9065);
3830 	rtl_writephy(tp, 0x14, 0x1065);
3831 
3832 	/* Check ALDPS bit, disable it if enabled */
3833 	rtl_writephy(tp, 0x1f, 0x0a43);
3834 	if (rtl_readphy(tp, 0x10) & 0x0004)
3835 		rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
3836 
3837 	rtl_writephy(tp, 0x1f, 0x0000);
3838 }
3839 
rtl8168g_2_hw_phy_config(struct rtl8169_private * tp)3840 static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
3841 {
3842 	rtl_apply_firmware(tp);
3843 }
3844 
rtl8168h_1_hw_phy_config(struct rtl8169_private * tp)3845 static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
3846 {
3847 	u16 dout_tapbin;
3848 	u32 data;
3849 
3850 	rtl_apply_firmware(tp);
3851 
3852 	/* CHN EST parameters adjust - giga master */
3853 	rtl_writephy(tp, 0x1f, 0x0a43);
3854 	rtl_writephy(tp, 0x13, 0x809b);
3855 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0xf800);
3856 	rtl_writephy(tp, 0x13, 0x80a2);
3857 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0xff00);
3858 	rtl_writephy(tp, 0x13, 0x80a4);
3859 	rtl_w0w1_phy(tp, 0x14, 0x8500, 0xff00);
3860 	rtl_writephy(tp, 0x13, 0x809c);
3861 	rtl_w0w1_phy(tp, 0x14, 0xbd00, 0xff00);
3862 	rtl_writephy(tp, 0x1f, 0x0000);
3863 
3864 	/* CHN EST parameters adjust - giga slave */
3865 	rtl_writephy(tp, 0x1f, 0x0a43);
3866 	rtl_writephy(tp, 0x13, 0x80ad);
3867 	rtl_w0w1_phy(tp, 0x14, 0x7000, 0xf800);
3868 	rtl_writephy(tp, 0x13, 0x80b4);
3869 	rtl_w0w1_phy(tp, 0x14, 0x5000, 0xff00);
3870 	rtl_writephy(tp, 0x13, 0x80ac);
3871 	rtl_w0w1_phy(tp, 0x14, 0x4000, 0xff00);
3872 	rtl_writephy(tp, 0x1f, 0x0000);
3873 
3874 	/* CHN EST parameters adjust - fnet */
3875 	rtl_writephy(tp, 0x1f, 0x0a43);
3876 	rtl_writephy(tp, 0x13, 0x808e);
3877 	rtl_w0w1_phy(tp, 0x14, 0x1200, 0xff00);
3878 	rtl_writephy(tp, 0x13, 0x8090);
3879 	rtl_w0w1_phy(tp, 0x14, 0xe500, 0xff00);
3880 	rtl_writephy(tp, 0x13, 0x8092);
3881 	rtl_w0w1_phy(tp, 0x14, 0x9f00, 0xff00);
3882 	rtl_writephy(tp, 0x1f, 0x0000);
3883 
3884 	/* enable R-tune & PGA-retune function */
3885 	dout_tapbin = 0;
3886 	rtl_writephy(tp, 0x1f, 0x0a46);
3887 	data = rtl_readphy(tp, 0x13);
3888 	data &= 3;
3889 	data <<= 2;
3890 	dout_tapbin |= data;
3891 	data = rtl_readphy(tp, 0x12);
3892 	data &= 0xc000;
3893 	data >>= 14;
3894 	dout_tapbin |= data;
3895 	dout_tapbin = ~(dout_tapbin^0x08);
3896 	dout_tapbin <<= 12;
3897 	dout_tapbin &= 0xf000;
3898 	rtl_writephy(tp, 0x1f, 0x0a43);
3899 	rtl_writephy(tp, 0x13, 0x827a);
3900 	rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3901 	rtl_writephy(tp, 0x13, 0x827b);
3902 	rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3903 	rtl_writephy(tp, 0x13, 0x827c);
3904 	rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3905 	rtl_writephy(tp, 0x13, 0x827d);
3906 	rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3907 
3908 	rtl_writephy(tp, 0x1f, 0x0a43);
3909 	rtl_writephy(tp, 0x13, 0x0811);
3910 	rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000);
3911 	rtl_writephy(tp, 0x1f, 0x0a42);
3912 	rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000);
3913 	rtl_writephy(tp, 0x1f, 0x0000);
3914 
3915 	/* enable GPHY 10M */
3916 	rtl_writephy(tp, 0x1f, 0x0a44);
3917 	rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000);
3918 	rtl_writephy(tp, 0x1f, 0x0000);
3919 
3920 	/* SAR ADC performance */
3921 	rtl_writephy(tp, 0x1f, 0x0bca);
3922 	rtl_w0w1_phy(tp, 0x17, 0x4000, 0x3000);
3923 	rtl_writephy(tp, 0x1f, 0x0000);
3924 
3925 	rtl_writephy(tp, 0x1f, 0x0a43);
3926 	rtl_writephy(tp, 0x13, 0x803f);
3927 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3928 	rtl_writephy(tp, 0x13, 0x8047);
3929 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3930 	rtl_writephy(tp, 0x13, 0x804f);
3931 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3932 	rtl_writephy(tp, 0x13, 0x8057);
3933 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3934 	rtl_writephy(tp, 0x13, 0x805f);
3935 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3936 	rtl_writephy(tp, 0x13, 0x8067);
3937 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3938 	rtl_writephy(tp, 0x13, 0x806f);
3939 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3940 	rtl_writephy(tp, 0x1f, 0x0000);
3941 
3942 	/* disable phy pfm mode */
3943 	rtl_writephy(tp, 0x1f, 0x0a44);
3944 	rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080);
3945 	rtl_writephy(tp, 0x1f, 0x0000);
3946 
3947 	/* Check ALDPS bit, disable it if enabled */
3948 	rtl_writephy(tp, 0x1f, 0x0a43);
3949 	if (rtl_readphy(tp, 0x10) & 0x0004)
3950 		rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
3951 
3952 	rtl_writephy(tp, 0x1f, 0x0000);
3953 }
3954 
rtl8168h_2_hw_phy_config(struct rtl8169_private * tp)3955 static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
3956 {
3957 	u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0;
3958 	u16 rlen;
3959 	u32 data;
3960 
3961 	rtl_apply_firmware(tp);
3962 
3963 	/* CHIN EST parameter update */
3964 	rtl_writephy(tp, 0x1f, 0x0a43);
3965 	rtl_writephy(tp, 0x13, 0x808a);
3966 	rtl_w0w1_phy(tp, 0x14, 0x000a, 0x003f);
3967 	rtl_writephy(tp, 0x1f, 0x0000);
3968 
3969 	/* enable R-tune & PGA-retune function */
3970 	rtl_writephy(tp, 0x1f, 0x0a43);
3971 	rtl_writephy(tp, 0x13, 0x0811);
3972 	rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000);
3973 	rtl_writephy(tp, 0x1f, 0x0a42);
3974 	rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000);
3975 	rtl_writephy(tp, 0x1f, 0x0000);
3976 
3977 	/* enable GPHY 10M */
3978 	rtl_writephy(tp, 0x1f, 0x0a44);
3979 	rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000);
3980 	rtl_writephy(tp, 0x1f, 0x0000);
3981 
3982 	r8168_mac_ocp_write(tp, 0xdd02, 0x807d);
3983 	data = r8168_mac_ocp_read(tp, 0xdd02);
3984 	ioffset_p3 = ((data & 0x80)>>7);
3985 	ioffset_p3 <<= 3;
3986 
3987 	data = r8168_mac_ocp_read(tp, 0xdd00);
3988 	ioffset_p3 |= ((data & (0xe000))>>13);
3989 	ioffset_p2 = ((data & (0x1e00))>>9);
3990 	ioffset_p1 = ((data & (0x01e0))>>5);
3991 	ioffset_p0 = ((data & 0x0010)>>4);
3992 	ioffset_p0 <<= 3;
3993 	ioffset_p0 |= (data & (0x07));
3994 	data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0);
3995 
3996 	if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) ||
3997 	    (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f)) {
3998 		rtl_writephy(tp, 0x1f, 0x0bcf);
3999 		rtl_writephy(tp, 0x16, data);
4000 		rtl_writephy(tp, 0x1f, 0x0000);
4001 	}
4002 
4003 	/* Modify rlen (TX LPF corner frequency) level */
4004 	rtl_writephy(tp, 0x1f, 0x0bcd);
4005 	data = rtl_readphy(tp, 0x16);
4006 	data &= 0x000f;
4007 	rlen = 0;
4008 	if (data > 3)
4009 		rlen = data - 3;
4010 	data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12);
4011 	rtl_writephy(tp, 0x17, data);
4012 	rtl_writephy(tp, 0x1f, 0x0bcd);
4013 	rtl_writephy(tp, 0x1f, 0x0000);
4014 
4015 	/* disable phy pfm mode */
4016 	rtl_writephy(tp, 0x1f, 0x0a44);
4017 	rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080);
4018 	rtl_writephy(tp, 0x1f, 0x0000);
4019 
4020 	/* Check ALDPS bit, disable it if enabled */
4021 	rtl_writephy(tp, 0x1f, 0x0a43);
4022 	if (rtl_readphy(tp, 0x10) & 0x0004)
4023 		rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
4024 
4025 	rtl_writephy(tp, 0x1f, 0x0000);
4026 }
4027 
rtl8168ep_1_hw_phy_config(struct rtl8169_private * tp)4028 static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
4029 {
4030 	/* Enable PHY auto speed down */
4031 	rtl_writephy(tp, 0x1f, 0x0a44);
4032 	rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000);
4033 	rtl_writephy(tp, 0x1f, 0x0000);
4034 
4035 	/* patch 10M & ALDPS */
4036 	rtl_writephy(tp, 0x1f, 0x0bcc);
4037 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100);
4038 	rtl_writephy(tp, 0x1f, 0x0a44);
4039 	rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000);
4040 	rtl_writephy(tp, 0x1f, 0x0a43);
4041 	rtl_writephy(tp, 0x13, 0x8084);
4042 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000);
4043 	rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000);
4044 	rtl_writephy(tp, 0x1f, 0x0000);
4045 
4046 	/* Enable EEE auto-fallback function */
4047 	rtl_writephy(tp, 0x1f, 0x0a4b);
4048 	rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000);
4049 	rtl_writephy(tp, 0x1f, 0x0000);
4050 
4051 	/* Enable UC LPF tune function */
4052 	rtl_writephy(tp, 0x1f, 0x0a43);
4053 	rtl_writephy(tp, 0x13, 0x8012);
4054 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
4055 	rtl_writephy(tp, 0x1f, 0x0000);
4056 
4057 	/* set rg_sel_sdm_rate */
4058 	rtl_writephy(tp, 0x1f, 0x0c42);
4059 	rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000);
4060 	rtl_writephy(tp, 0x1f, 0x0000);
4061 
4062 	/* Check ALDPS bit, disable it if enabled */
4063 	rtl_writephy(tp, 0x1f, 0x0a43);
4064 	if (rtl_readphy(tp, 0x10) & 0x0004)
4065 		rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
4066 
4067 	rtl_writephy(tp, 0x1f, 0x0000);
4068 }
4069 
rtl8168ep_2_hw_phy_config(struct rtl8169_private * tp)4070 static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
4071 {
4072 	/* patch 10M & ALDPS */
4073 	rtl_writephy(tp, 0x1f, 0x0bcc);
4074 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100);
4075 	rtl_writephy(tp, 0x1f, 0x0a44);
4076 	rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000);
4077 	rtl_writephy(tp, 0x1f, 0x0a43);
4078 	rtl_writephy(tp, 0x13, 0x8084);
4079 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000);
4080 	rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000);
4081 	rtl_writephy(tp, 0x1f, 0x0000);
4082 
4083 	/* Enable UC LPF tune function */
4084 	rtl_writephy(tp, 0x1f, 0x0a43);
4085 	rtl_writephy(tp, 0x13, 0x8012);
4086 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
4087 	rtl_writephy(tp, 0x1f, 0x0000);
4088 
4089 	/* Set rg_sel_sdm_rate */
4090 	rtl_writephy(tp, 0x1f, 0x0c42);
4091 	rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000);
4092 	rtl_writephy(tp, 0x1f, 0x0000);
4093 
4094 	/* Channel estimation parameters */
4095 	rtl_writephy(tp, 0x1f, 0x0a43);
4096 	rtl_writephy(tp, 0x13, 0x80f3);
4097 	rtl_w0w1_phy(tp, 0x14, 0x8b00, ~0x8bff);
4098 	rtl_writephy(tp, 0x13, 0x80f0);
4099 	rtl_w0w1_phy(tp, 0x14, 0x3a00, ~0x3aff);
4100 	rtl_writephy(tp, 0x13, 0x80ef);
4101 	rtl_w0w1_phy(tp, 0x14, 0x0500, ~0x05ff);
4102 	rtl_writephy(tp, 0x13, 0x80f6);
4103 	rtl_w0w1_phy(tp, 0x14, 0x6e00, ~0x6eff);
4104 	rtl_writephy(tp, 0x13, 0x80ec);
4105 	rtl_w0w1_phy(tp, 0x14, 0x6800, ~0x68ff);
4106 	rtl_writephy(tp, 0x13, 0x80ed);
4107 	rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff);
4108 	rtl_writephy(tp, 0x13, 0x80f2);
4109 	rtl_w0w1_phy(tp, 0x14, 0xf400, ~0xf4ff);
4110 	rtl_writephy(tp, 0x13, 0x80f4);
4111 	rtl_w0w1_phy(tp, 0x14, 0x8500, ~0x85ff);
4112 	rtl_writephy(tp, 0x1f, 0x0a43);
4113 	rtl_writephy(tp, 0x13, 0x8110);
4114 	rtl_w0w1_phy(tp, 0x14, 0xa800, ~0xa8ff);
4115 	rtl_writephy(tp, 0x13, 0x810f);
4116 	rtl_w0w1_phy(tp, 0x14, 0x1d00, ~0x1dff);
4117 	rtl_writephy(tp, 0x13, 0x8111);
4118 	rtl_w0w1_phy(tp, 0x14, 0xf500, ~0xf5ff);
4119 	rtl_writephy(tp, 0x13, 0x8113);
4120 	rtl_w0w1_phy(tp, 0x14, 0x6100, ~0x61ff);
4121 	rtl_writephy(tp, 0x13, 0x8115);
4122 	rtl_w0w1_phy(tp, 0x14, 0x9200, ~0x92ff);
4123 	rtl_writephy(tp, 0x13, 0x810e);
4124 	rtl_w0w1_phy(tp, 0x14, 0x0400, ~0x04ff);
4125 	rtl_writephy(tp, 0x13, 0x810c);
4126 	rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff);
4127 	rtl_writephy(tp, 0x13, 0x810b);
4128 	rtl_w0w1_phy(tp, 0x14, 0x5a00, ~0x5aff);
4129 	rtl_writephy(tp, 0x1f, 0x0a43);
4130 	rtl_writephy(tp, 0x13, 0x80d1);
4131 	rtl_w0w1_phy(tp, 0x14, 0xff00, ~0xffff);
4132 	rtl_writephy(tp, 0x13, 0x80cd);
4133 	rtl_w0w1_phy(tp, 0x14, 0x9e00, ~0x9eff);
4134 	rtl_writephy(tp, 0x13, 0x80d3);
4135 	rtl_w0w1_phy(tp, 0x14, 0x0e00, ~0x0eff);
4136 	rtl_writephy(tp, 0x13, 0x80d5);
4137 	rtl_w0w1_phy(tp, 0x14, 0xca00, ~0xcaff);
4138 	rtl_writephy(tp, 0x13, 0x80d7);
4139 	rtl_w0w1_phy(tp, 0x14, 0x8400, ~0x84ff);
4140 
4141 	/* Force PWM-mode */
4142 	rtl_writephy(tp, 0x1f, 0x0bcd);
4143 	rtl_writephy(tp, 0x14, 0x5065);
4144 	rtl_writephy(tp, 0x14, 0xd065);
4145 	rtl_writephy(tp, 0x1f, 0x0bc8);
4146 	rtl_writephy(tp, 0x12, 0x00ed);
4147 	rtl_writephy(tp, 0x1f, 0x0bcd);
4148 	rtl_writephy(tp, 0x14, 0x1065);
4149 	rtl_writephy(tp, 0x14, 0x9065);
4150 	rtl_writephy(tp, 0x14, 0x1065);
4151 	rtl_writephy(tp, 0x1f, 0x0000);
4152 
4153 	/* Check ALDPS bit, disable it if enabled */
4154 	rtl_writephy(tp, 0x1f, 0x0a43);
4155 	if (rtl_readphy(tp, 0x10) & 0x0004)
4156 		rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
4157 
4158 	rtl_writephy(tp, 0x1f, 0x0000);
4159 }
4160 
rtl8102e_hw_phy_config(struct rtl8169_private * tp)4161 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
4162 {
4163 	static const struct phy_reg phy_reg_init[] = {
4164 		{ 0x1f, 0x0003 },
4165 		{ 0x08, 0x441d },
4166 		{ 0x01, 0x9100 },
4167 		{ 0x1f, 0x0000 }
4168 	};
4169 
4170 	rtl_writephy(tp, 0x1f, 0x0000);
4171 	rtl_patchphy(tp, 0x11, 1 << 12);
4172 	rtl_patchphy(tp, 0x19, 1 << 13);
4173 	rtl_patchphy(tp, 0x10, 1 << 15);
4174 
4175 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
4176 }
4177 
rtl8105e_hw_phy_config(struct rtl8169_private * tp)4178 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
4179 {
4180 	static const struct phy_reg phy_reg_init[] = {
4181 		{ 0x1f, 0x0005 },
4182 		{ 0x1a, 0x0000 },
4183 		{ 0x1f, 0x0000 },
4184 
4185 		{ 0x1f, 0x0004 },
4186 		{ 0x1c, 0x0000 },
4187 		{ 0x1f, 0x0000 },
4188 
4189 		{ 0x1f, 0x0001 },
4190 		{ 0x15, 0x7701 },
4191 		{ 0x1f, 0x0000 }
4192 	};
4193 
4194 	/* Disable ALDPS before ram code */
4195 	rtl_writephy(tp, 0x1f, 0x0000);
4196 	rtl_writephy(tp, 0x18, 0x0310);
4197 	msleep(100);
4198 
4199 	rtl_apply_firmware(tp);
4200 
4201 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
4202 }
4203 
rtl8402_hw_phy_config(struct rtl8169_private * tp)4204 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
4205 {
4206 	/* Disable ALDPS before setting firmware */
4207 	rtl_writephy(tp, 0x1f, 0x0000);
4208 	rtl_writephy(tp, 0x18, 0x0310);
4209 	msleep(20);
4210 
4211 	rtl_apply_firmware(tp);
4212 
4213 	/* EEE setting */
4214 	rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4215 	rtl_writephy(tp, 0x1f, 0x0004);
4216 	rtl_writephy(tp, 0x10, 0x401f);
4217 	rtl_writephy(tp, 0x19, 0x7030);
4218 	rtl_writephy(tp, 0x1f, 0x0000);
4219 }
4220 
rtl8106e_hw_phy_config(struct rtl8169_private * tp)4221 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
4222 {
4223 	static const struct phy_reg phy_reg_init[] = {
4224 		{ 0x1f, 0x0004 },
4225 		{ 0x10, 0xc07f },
4226 		{ 0x19, 0x7030 },
4227 		{ 0x1f, 0x0000 }
4228 	};
4229 
4230 	/* Disable ALDPS before ram code */
4231 	rtl_writephy(tp, 0x1f, 0x0000);
4232 	rtl_writephy(tp, 0x18, 0x0310);
4233 	msleep(100);
4234 
4235 	rtl_apply_firmware(tp);
4236 
4237 	rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4238 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
4239 
4240 	rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4241 }
4242 
rtl_hw_phy_config(struct net_device * dev)4243 static void rtl_hw_phy_config(struct net_device *dev)
4244 {
4245 	struct rtl8169_private *tp = netdev_priv(dev);
4246 
4247 	rtl8169_print_mac_version(tp);
4248 
4249 	switch (tp->mac_version) {
4250 	case RTL_GIGA_MAC_VER_01:
4251 		break;
4252 	case RTL_GIGA_MAC_VER_02:
4253 	case RTL_GIGA_MAC_VER_03:
4254 		rtl8169s_hw_phy_config(tp);
4255 		break;
4256 	case RTL_GIGA_MAC_VER_04:
4257 		rtl8169sb_hw_phy_config(tp);
4258 		break;
4259 	case RTL_GIGA_MAC_VER_05:
4260 		rtl8169scd_hw_phy_config(tp);
4261 		break;
4262 	case RTL_GIGA_MAC_VER_06:
4263 		rtl8169sce_hw_phy_config(tp);
4264 		break;
4265 	case RTL_GIGA_MAC_VER_07:
4266 	case RTL_GIGA_MAC_VER_08:
4267 	case RTL_GIGA_MAC_VER_09:
4268 		rtl8102e_hw_phy_config(tp);
4269 		break;
4270 	case RTL_GIGA_MAC_VER_11:
4271 		rtl8168bb_hw_phy_config(tp);
4272 		break;
4273 	case RTL_GIGA_MAC_VER_12:
4274 		rtl8168bef_hw_phy_config(tp);
4275 		break;
4276 	case RTL_GIGA_MAC_VER_17:
4277 		rtl8168bef_hw_phy_config(tp);
4278 		break;
4279 	case RTL_GIGA_MAC_VER_18:
4280 		rtl8168cp_1_hw_phy_config(tp);
4281 		break;
4282 	case RTL_GIGA_MAC_VER_19:
4283 		rtl8168c_1_hw_phy_config(tp);
4284 		break;
4285 	case RTL_GIGA_MAC_VER_20:
4286 		rtl8168c_2_hw_phy_config(tp);
4287 		break;
4288 	case RTL_GIGA_MAC_VER_21:
4289 		rtl8168c_3_hw_phy_config(tp);
4290 		break;
4291 	case RTL_GIGA_MAC_VER_22:
4292 		rtl8168c_4_hw_phy_config(tp);
4293 		break;
4294 	case RTL_GIGA_MAC_VER_23:
4295 	case RTL_GIGA_MAC_VER_24:
4296 		rtl8168cp_2_hw_phy_config(tp);
4297 		break;
4298 	case RTL_GIGA_MAC_VER_25:
4299 		rtl8168d_1_hw_phy_config(tp);
4300 		break;
4301 	case RTL_GIGA_MAC_VER_26:
4302 		rtl8168d_2_hw_phy_config(tp);
4303 		break;
4304 	case RTL_GIGA_MAC_VER_27:
4305 		rtl8168d_3_hw_phy_config(tp);
4306 		break;
4307 	case RTL_GIGA_MAC_VER_28:
4308 		rtl8168d_4_hw_phy_config(tp);
4309 		break;
4310 	case RTL_GIGA_MAC_VER_29:
4311 	case RTL_GIGA_MAC_VER_30:
4312 		rtl8105e_hw_phy_config(tp);
4313 		break;
4314 	case RTL_GIGA_MAC_VER_31:
4315 		/* None. */
4316 		break;
4317 	case RTL_GIGA_MAC_VER_32:
4318 	case RTL_GIGA_MAC_VER_33:
4319 		rtl8168e_1_hw_phy_config(tp);
4320 		break;
4321 	case RTL_GIGA_MAC_VER_34:
4322 		rtl8168e_2_hw_phy_config(tp);
4323 		break;
4324 	case RTL_GIGA_MAC_VER_35:
4325 		rtl8168f_1_hw_phy_config(tp);
4326 		break;
4327 	case RTL_GIGA_MAC_VER_36:
4328 		rtl8168f_2_hw_phy_config(tp);
4329 		break;
4330 
4331 	case RTL_GIGA_MAC_VER_37:
4332 		rtl8402_hw_phy_config(tp);
4333 		break;
4334 
4335 	case RTL_GIGA_MAC_VER_38:
4336 		rtl8411_hw_phy_config(tp);
4337 		break;
4338 
4339 	case RTL_GIGA_MAC_VER_39:
4340 		rtl8106e_hw_phy_config(tp);
4341 		break;
4342 
4343 	case RTL_GIGA_MAC_VER_40:
4344 		rtl8168g_1_hw_phy_config(tp);
4345 		break;
4346 	case RTL_GIGA_MAC_VER_42:
4347 	case RTL_GIGA_MAC_VER_43:
4348 	case RTL_GIGA_MAC_VER_44:
4349 		rtl8168g_2_hw_phy_config(tp);
4350 		break;
4351 	case RTL_GIGA_MAC_VER_45:
4352 	case RTL_GIGA_MAC_VER_47:
4353 		rtl8168h_1_hw_phy_config(tp);
4354 		break;
4355 	case RTL_GIGA_MAC_VER_46:
4356 	case RTL_GIGA_MAC_VER_48:
4357 		rtl8168h_2_hw_phy_config(tp);
4358 		break;
4359 
4360 	case RTL_GIGA_MAC_VER_49:
4361 		rtl8168ep_1_hw_phy_config(tp);
4362 		break;
4363 	case RTL_GIGA_MAC_VER_50:
4364 	case RTL_GIGA_MAC_VER_51:
4365 		rtl8168ep_2_hw_phy_config(tp);
4366 		break;
4367 
4368 	case RTL_GIGA_MAC_VER_41:
4369 	default:
4370 		break;
4371 	}
4372 }
4373 
rtl_phy_work(struct rtl8169_private * tp)4374 static void rtl_phy_work(struct rtl8169_private *tp)
4375 {
4376 	struct timer_list *timer = &tp->timer;
4377 	void __iomem *ioaddr = tp->mmio_addr;
4378 	unsigned long timeout = RTL8169_PHY_TIMEOUT;
4379 
4380 	assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
4381 
4382 	if (tp->phy_reset_pending(tp)) {
4383 		/*
4384 		 * A busy loop could burn quite a few cycles on nowadays CPU.
4385 		 * Let's delay the execution of the timer for a few ticks.
4386 		 */
4387 		timeout = HZ/10;
4388 		goto out_mod_timer;
4389 	}
4390 
4391 	if (tp->link_ok(ioaddr))
4392 		return;
4393 
4394 	netif_dbg(tp, link, tp->dev, "PHY reset until link up\n");
4395 
4396 	tp->phy_reset_enable(tp);
4397 
4398 out_mod_timer:
4399 	mod_timer(timer, jiffies + timeout);
4400 }
4401 
rtl_schedule_task(struct rtl8169_private * tp,enum rtl_flag flag)4402 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
4403 {
4404 	if (!test_and_set_bit(flag, tp->wk.flags))
4405 		schedule_work(&tp->wk.work);
4406 }
4407 
rtl8169_phy_timer(unsigned long __opaque)4408 static void rtl8169_phy_timer(unsigned long __opaque)
4409 {
4410 	struct net_device *dev = (struct net_device *)__opaque;
4411 	struct rtl8169_private *tp = netdev_priv(dev);
4412 
4413 	rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
4414 }
4415 
rtl8169_release_board(struct pci_dev * pdev,struct net_device * dev,void __iomem * ioaddr)4416 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
4417 				  void __iomem *ioaddr)
4418 {
4419 	iounmap(ioaddr);
4420 	pci_release_regions(pdev);
4421 	pci_clear_mwi(pdev);
4422 	pci_disable_device(pdev);
4423 	free_netdev(dev);
4424 }
4425 
DECLARE_RTL_COND(rtl_phy_reset_cond)4426 DECLARE_RTL_COND(rtl_phy_reset_cond)
4427 {
4428 	return tp->phy_reset_pending(tp);
4429 }
4430 
rtl8169_phy_reset(struct net_device * dev,struct rtl8169_private * tp)4431 static void rtl8169_phy_reset(struct net_device *dev,
4432 			      struct rtl8169_private *tp)
4433 {
4434 	tp->phy_reset_enable(tp);
4435 	rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
4436 }
4437 
rtl_tbi_enabled(struct rtl8169_private * tp)4438 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
4439 {
4440 	void __iomem *ioaddr = tp->mmio_addr;
4441 
4442 	return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
4443 	    (RTL_R8(PHYstatus) & TBI_Enable);
4444 }
4445 
rtl8169_init_phy(struct net_device * dev,struct rtl8169_private * tp)4446 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
4447 {
4448 	void __iomem *ioaddr = tp->mmio_addr;
4449 
4450 	rtl_hw_phy_config(dev);
4451 
4452 	if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
4453 		dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
4454 		RTL_W8(0x82, 0x01);
4455 	}
4456 
4457 	pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
4458 
4459 	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
4460 		pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
4461 
4462 	if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
4463 		dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
4464 		RTL_W8(0x82, 0x01);
4465 		dprintk("Set PHY Reg 0x0bh = 0x00h\n");
4466 		rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
4467 	}
4468 
4469 	rtl8169_phy_reset(dev, tp);
4470 
4471 	rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
4472 			  ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4473 			  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
4474 			  (tp->mii.supports_gmii ?
4475 			   ADVERTISED_1000baseT_Half |
4476 			   ADVERTISED_1000baseT_Full : 0));
4477 
4478 	if (rtl_tbi_enabled(tp))
4479 		netif_info(tp, link, dev, "TBI auto-negotiating\n");
4480 }
4481 
rtl_rar_set(struct rtl8169_private * tp,u8 * addr)4482 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
4483 {
4484 	void __iomem *ioaddr = tp->mmio_addr;
4485 
4486 	rtl_lock_work(tp);
4487 
4488 	RTL_W8(Cfg9346, Cfg9346_Unlock);
4489 
4490 	RTL_W32(MAC4, addr[4] | addr[5] << 8);
4491 	RTL_R32(MAC4);
4492 
4493 	RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
4494 	RTL_R32(MAC0);
4495 
4496 	if (tp->mac_version == RTL_GIGA_MAC_VER_34)
4497 		rtl_rar_exgmac_set(tp, addr);
4498 
4499 	RTL_W8(Cfg9346, Cfg9346_Lock);
4500 
4501 	rtl_unlock_work(tp);
4502 }
4503 
rtl_set_mac_address(struct net_device * dev,void * p)4504 static int rtl_set_mac_address(struct net_device *dev, void *p)
4505 {
4506 	struct rtl8169_private *tp = netdev_priv(dev);
4507 	struct device *d = &tp->pci_dev->dev;
4508 	struct sockaddr *addr = p;
4509 
4510 	if (!is_valid_ether_addr(addr->sa_data))
4511 		return -EADDRNOTAVAIL;
4512 
4513 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4514 
4515 	pm_runtime_get_noresume(d);
4516 
4517 	if (pm_runtime_active(d))
4518 		rtl_rar_set(tp, dev->dev_addr);
4519 
4520 	pm_runtime_put_noidle(d);
4521 
4522 	return 0;
4523 }
4524 
rtl8169_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)4525 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4526 {
4527 	struct rtl8169_private *tp = netdev_priv(dev);
4528 	struct mii_ioctl_data *data = if_mii(ifr);
4529 
4530 	return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
4531 }
4532 
rtl_xmii_ioctl(struct rtl8169_private * tp,struct mii_ioctl_data * data,int cmd)4533 static int rtl_xmii_ioctl(struct rtl8169_private *tp,
4534 			  struct mii_ioctl_data *data, int cmd)
4535 {
4536 	switch (cmd) {
4537 	case SIOCGMIIPHY:
4538 		data->phy_id = 32; /* Internal PHY */
4539 		return 0;
4540 
4541 	case SIOCGMIIREG:
4542 		data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
4543 		return 0;
4544 
4545 	case SIOCSMIIREG:
4546 		rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
4547 		return 0;
4548 	}
4549 	return -EOPNOTSUPP;
4550 }
4551 
rtl_tbi_ioctl(struct rtl8169_private * tp,struct mii_ioctl_data * data,int cmd)4552 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
4553 {
4554 	return -EOPNOTSUPP;
4555 }
4556 
rtl_disable_msi(struct pci_dev * pdev,struct rtl8169_private * tp)4557 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
4558 {
4559 	if (tp->features & RTL_FEATURE_MSI) {
4560 		pci_disable_msi(pdev);
4561 		tp->features &= ~RTL_FEATURE_MSI;
4562 	}
4563 }
4564 
rtl_init_mdio_ops(struct rtl8169_private * tp)4565 static void rtl_init_mdio_ops(struct rtl8169_private *tp)
4566 {
4567 	struct mdio_ops *ops = &tp->mdio_ops;
4568 
4569 	switch (tp->mac_version) {
4570 	case RTL_GIGA_MAC_VER_27:
4571 		ops->write	= r8168dp_1_mdio_write;
4572 		ops->read	= r8168dp_1_mdio_read;
4573 		break;
4574 	case RTL_GIGA_MAC_VER_28:
4575 	case RTL_GIGA_MAC_VER_31:
4576 		ops->write	= r8168dp_2_mdio_write;
4577 		ops->read	= r8168dp_2_mdio_read;
4578 		break;
4579 	case RTL_GIGA_MAC_VER_40:
4580 	case RTL_GIGA_MAC_VER_41:
4581 	case RTL_GIGA_MAC_VER_42:
4582 	case RTL_GIGA_MAC_VER_43:
4583 	case RTL_GIGA_MAC_VER_44:
4584 	case RTL_GIGA_MAC_VER_45:
4585 	case RTL_GIGA_MAC_VER_46:
4586 	case RTL_GIGA_MAC_VER_47:
4587 	case RTL_GIGA_MAC_VER_48:
4588 	case RTL_GIGA_MAC_VER_49:
4589 	case RTL_GIGA_MAC_VER_50:
4590 	case RTL_GIGA_MAC_VER_51:
4591 		ops->write	= r8168g_mdio_write;
4592 		ops->read	= r8168g_mdio_read;
4593 		break;
4594 	default:
4595 		ops->write	= r8169_mdio_write;
4596 		ops->read	= r8169_mdio_read;
4597 		break;
4598 	}
4599 }
4600 
rtl_speed_down(struct rtl8169_private * tp)4601 static void rtl_speed_down(struct rtl8169_private *tp)
4602 {
4603 	u32 adv;
4604 	int lpa;
4605 
4606 	rtl_writephy(tp, 0x1f, 0x0000);
4607 	lpa = rtl_readphy(tp, MII_LPA);
4608 
4609 	if (lpa & (LPA_10HALF | LPA_10FULL))
4610 		adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
4611 	else if (lpa & (LPA_100HALF | LPA_100FULL))
4612 		adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4613 		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4614 	else
4615 		adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4616 		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
4617 		      (tp->mii.supports_gmii ?
4618 		       ADVERTISED_1000baseT_Half |
4619 		       ADVERTISED_1000baseT_Full : 0);
4620 
4621 	rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
4622 			  adv);
4623 }
4624 
rtl_wol_suspend_quirk(struct rtl8169_private * tp)4625 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
4626 {
4627 	void __iomem *ioaddr = tp->mmio_addr;
4628 
4629 	switch (tp->mac_version) {
4630 	case RTL_GIGA_MAC_VER_25:
4631 	case RTL_GIGA_MAC_VER_26:
4632 	case RTL_GIGA_MAC_VER_29:
4633 	case RTL_GIGA_MAC_VER_30:
4634 	case RTL_GIGA_MAC_VER_32:
4635 	case RTL_GIGA_MAC_VER_33:
4636 	case RTL_GIGA_MAC_VER_34:
4637 	case RTL_GIGA_MAC_VER_37:
4638 	case RTL_GIGA_MAC_VER_38:
4639 	case RTL_GIGA_MAC_VER_39:
4640 	case RTL_GIGA_MAC_VER_40:
4641 	case RTL_GIGA_MAC_VER_41:
4642 	case RTL_GIGA_MAC_VER_42:
4643 	case RTL_GIGA_MAC_VER_43:
4644 	case RTL_GIGA_MAC_VER_44:
4645 	case RTL_GIGA_MAC_VER_45:
4646 	case RTL_GIGA_MAC_VER_46:
4647 	case RTL_GIGA_MAC_VER_47:
4648 	case RTL_GIGA_MAC_VER_48:
4649 	case RTL_GIGA_MAC_VER_49:
4650 	case RTL_GIGA_MAC_VER_50:
4651 	case RTL_GIGA_MAC_VER_51:
4652 		RTL_W32(RxConfig, RTL_R32(RxConfig) |
4653 			AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
4654 		break;
4655 	default:
4656 		break;
4657 	}
4658 }
4659 
rtl_wol_pll_power_down(struct rtl8169_private * tp)4660 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
4661 {
4662 	if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
4663 		return false;
4664 
4665 	rtl_speed_down(tp);
4666 	rtl_wol_suspend_quirk(tp);
4667 
4668 	return true;
4669 }
4670 
r810x_phy_power_down(struct rtl8169_private * tp)4671 static void r810x_phy_power_down(struct rtl8169_private *tp)
4672 {
4673 	rtl_writephy(tp, 0x1f, 0x0000);
4674 	rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
4675 }
4676 
r810x_phy_power_up(struct rtl8169_private * tp)4677 static void r810x_phy_power_up(struct rtl8169_private *tp)
4678 {
4679 	rtl_writephy(tp, 0x1f, 0x0000);
4680 	rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
4681 }
4682 
r810x_pll_power_down(struct rtl8169_private * tp)4683 static void r810x_pll_power_down(struct rtl8169_private *tp)
4684 {
4685 	void __iomem *ioaddr = tp->mmio_addr;
4686 
4687 	if (rtl_wol_pll_power_down(tp))
4688 		return;
4689 
4690 	r810x_phy_power_down(tp);
4691 
4692 	switch (tp->mac_version) {
4693 	case RTL_GIGA_MAC_VER_07:
4694 	case RTL_GIGA_MAC_VER_08:
4695 	case RTL_GIGA_MAC_VER_09:
4696 	case RTL_GIGA_MAC_VER_10:
4697 	case RTL_GIGA_MAC_VER_13:
4698 	case RTL_GIGA_MAC_VER_16:
4699 		break;
4700 	default:
4701 		RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4702 		break;
4703 	}
4704 }
4705 
r810x_pll_power_up(struct rtl8169_private * tp)4706 static void r810x_pll_power_up(struct rtl8169_private *tp)
4707 {
4708 	void __iomem *ioaddr = tp->mmio_addr;
4709 
4710 	r810x_phy_power_up(tp);
4711 
4712 	switch (tp->mac_version) {
4713 	case RTL_GIGA_MAC_VER_07:
4714 	case RTL_GIGA_MAC_VER_08:
4715 	case RTL_GIGA_MAC_VER_09:
4716 	case RTL_GIGA_MAC_VER_10:
4717 	case RTL_GIGA_MAC_VER_13:
4718 	case RTL_GIGA_MAC_VER_16:
4719 		break;
4720 	case RTL_GIGA_MAC_VER_47:
4721 	case RTL_GIGA_MAC_VER_48:
4722 		RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0);
4723 		break;
4724 	default:
4725 		RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4726 		break;
4727 	}
4728 }
4729 
r8168_phy_power_up(struct rtl8169_private * tp)4730 static void r8168_phy_power_up(struct rtl8169_private *tp)
4731 {
4732 	rtl_writephy(tp, 0x1f, 0x0000);
4733 	switch (tp->mac_version) {
4734 	case RTL_GIGA_MAC_VER_11:
4735 	case RTL_GIGA_MAC_VER_12:
4736 	case RTL_GIGA_MAC_VER_17:
4737 	case RTL_GIGA_MAC_VER_18:
4738 	case RTL_GIGA_MAC_VER_19:
4739 	case RTL_GIGA_MAC_VER_20:
4740 	case RTL_GIGA_MAC_VER_21:
4741 	case RTL_GIGA_MAC_VER_22:
4742 	case RTL_GIGA_MAC_VER_23:
4743 	case RTL_GIGA_MAC_VER_24:
4744 	case RTL_GIGA_MAC_VER_25:
4745 	case RTL_GIGA_MAC_VER_26:
4746 	case RTL_GIGA_MAC_VER_27:
4747 	case RTL_GIGA_MAC_VER_28:
4748 	case RTL_GIGA_MAC_VER_31:
4749 		rtl_writephy(tp, 0x0e, 0x0000);
4750 		break;
4751 	default:
4752 		break;
4753 	}
4754 	rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
4755 }
4756 
r8168_phy_power_down(struct rtl8169_private * tp)4757 static void r8168_phy_power_down(struct rtl8169_private *tp)
4758 {
4759 	rtl_writephy(tp, 0x1f, 0x0000);
4760 	switch (tp->mac_version) {
4761 	case RTL_GIGA_MAC_VER_32:
4762 	case RTL_GIGA_MAC_VER_33:
4763 	case RTL_GIGA_MAC_VER_40:
4764 	case RTL_GIGA_MAC_VER_41:
4765 		rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
4766 		break;
4767 
4768 	case RTL_GIGA_MAC_VER_11:
4769 	case RTL_GIGA_MAC_VER_12:
4770 	case RTL_GIGA_MAC_VER_17:
4771 	case RTL_GIGA_MAC_VER_18:
4772 	case RTL_GIGA_MAC_VER_19:
4773 	case RTL_GIGA_MAC_VER_20:
4774 	case RTL_GIGA_MAC_VER_21:
4775 	case RTL_GIGA_MAC_VER_22:
4776 	case RTL_GIGA_MAC_VER_23:
4777 	case RTL_GIGA_MAC_VER_24:
4778 	case RTL_GIGA_MAC_VER_25:
4779 	case RTL_GIGA_MAC_VER_26:
4780 	case RTL_GIGA_MAC_VER_27:
4781 	case RTL_GIGA_MAC_VER_28:
4782 	case RTL_GIGA_MAC_VER_31:
4783 		rtl_writephy(tp, 0x0e, 0x0200);
4784 	default:
4785 		rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
4786 		break;
4787 	}
4788 }
4789 
r8168_pll_power_down(struct rtl8169_private * tp)4790 static void r8168_pll_power_down(struct rtl8169_private *tp)
4791 {
4792 	void __iomem *ioaddr = tp->mmio_addr;
4793 
4794 	if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4795 	     tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4796 	     tp->mac_version == RTL_GIGA_MAC_VER_31 ||
4797 	     tp->mac_version == RTL_GIGA_MAC_VER_49 ||
4798 	     tp->mac_version == RTL_GIGA_MAC_VER_50 ||
4799 	     tp->mac_version == RTL_GIGA_MAC_VER_51) &&
4800 	    r8168_check_dash(tp)) {
4801 		return;
4802 	}
4803 
4804 	if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
4805 	     tp->mac_version == RTL_GIGA_MAC_VER_24) &&
4806 	    (RTL_R16(CPlusCmd) & ASF)) {
4807 		return;
4808 	}
4809 
4810 	if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
4811 	    tp->mac_version == RTL_GIGA_MAC_VER_33)
4812 		rtl_ephy_write(tp, 0x19, 0xff64);
4813 
4814 	if (rtl_wol_pll_power_down(tp))
4815 		return;
4816 
4817 	r8168_phy_power_down(tp);
4818 
4819 	switch (tp->mac_version) {
4820 	case RTL_GIGA_MAC_VER_25:
4821 	case RTL_GIGA_MAC_VER_26:
4822 	case RTL_GIGA_MAC_VER_27:
4823 	case RTL_GIGA_MAC_VER_28:
4824 	case RTL_GIGA_MAC_VER_31:
4825 	case RTL_GIGA_MAC_VER_32:
4826 	case RTL_GIGA_MAC_VER_33:
4827 	case RTL_GIGA_MAC_VER_44:
4828 	case RTL_GIGA_MAC_VER_45:
4829 	case RTL_GIGA_MAC_VER_46:
4830 	case RTL_GIGA_MAC_VER_50:
4831 	case RTL_GIGA_MAC_VER_51:
4832 		RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4833 		break;
4834 	case RTL_GIGA_MAC_VER_40:
4835 	case RTL_GIGA_MAC_VER_41:
4836 	case RTL_GIGA_MAC_VER_49:
4837 		rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000,
4838 			     0xfc000000, ERIAR_EXGMAC);
4839 		RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4840 		break;
4841 	}
4842 }
4843 
r8168_pll_power_up(struct rtl8169_private * tp)4844 static void r8168_pll_power_up(struct rtl8169_private *tp)
4845 {
4846 	void __iomem *ioaddr = tp->mmio_addr;
4847 
4848 	switch (tp->mac_version) {
4849 	case RTL_GIGA_MAC_VER_25:
4850 	case RTL_GIGA_MAC_VER_26:
4851 	case RTL_GIGA_MAC_VER_27:
4852 	case RTL_GIGA_MAC_VER_28:
4853 	case RTL_GIGA_MAC_VER_31:
4854 	case RTL_GIGA_MAC_VER_32:
4855 	case RTL_GIGA_MAC_VER_33:
4856 		RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4857 		break;
4858 	case RTL_GIGA_MAC_VER_44:
4859 	case RTL_GIGA_MAC_VER_45:
4860 	case RTL_GIGA_MAC_VER_46:
4861 	case RTL_GIGA_MAC_VER_50:
4862 	case RTL_GIGA_MAC_VER_51:
4863 		RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0);
4864 		break;
4865 	case RTL_GIGA_MAC_VER_40:
4866 	case RTL_GIGA_MAC_VER_41:
4867 	case RTL_GIGA_MAC_VER_49:
4868 		RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0);
4869 		rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000,
4870 			     0x00000000, ERIAR_EXGMAC);
4871 		break;
4872 	}
4873 
4874 	r8168_phy_power_up(tp);
4875 }
4876 
rtl_generic_op(struct rtl8169_private * tp,void (* op)(struct rtl8169_private *))4877 static void rtl_generic_op(struct rtl8169_private *tp,
4878 			   void (*op)(struct rtl8169_private *))
4879 {
4880 	if (op)
4881 		op(tp);
4882 }
4883 
rtl_pll_power_down(struct rtl8169_private * tp)4884 static void rtl_pll_power_down(struct rtl8169_private *tp)
4885 {
4886 	rtl_generic_op(tp, tp->pll_power_ops.down);
4887 }
4888 
rtl_pll_power_up(struct rtl8169_private * tp)4889 static void rtl_pll_power_up(struct rtl8169_private *tp)
4890 {
4891 	rtl_generic_op(tp, tp->pll_power_ops.up);
4892 
4893 	/* give MAC/PHY some time to resume */
4894 	msleep(20);
4895 }
4896 
rtl_init_pll_power_ops(struct rtl8169_private * tp)4897 static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4898 {
4899 	struct pll_power_ops *ops = &tp->pll_power_ops;
4900 
4901 	switch (tp->mac_version) {
4902 	case RTL_GIGA_MAC_VER_07:
4903 	case RTL_GIGA_MAC_VER_08:
4904 	case RTL_GIGA_MAC_VER_09:
4905 	case RTL_GIGA_MAC_VER_10:
4906 	case RTL_GIGA_MAC_VER_16:
4907 	case RTL_GIGA_MAC_VER_29:
4908 	case RTL_GIGA_MAC_VER_30:
4909 	case RTL_GIGA_MAC_VER_37:
4910 	case RTL_GIGA_MAC_VER_39:
4911 	case RTL_GIGA_MAC_VER_43:
4912 	case RTL_GIGA_MAC_VER_47:
4913 	case RTL_GIGA_MAC_VER_48:
4914 		ops->down	= r810x_pll_power_down;
4915 		ops->up		= r810x_pll_power_up;
4916 		break;
4917 
4918 	case RTL_GIGA_MAC_VER_11:
4919 	case RTL_GIGA_MAC_VER_12:
4920 	case RTL_GIGA_MAC_VER_17:
4921 	case RTL_GIGA_MAC_VER_18:
4922 	case RTL_GIGA_MAC_VER_19:
4923 	case RTL_GIGA_MAC_VER_20:
4924 	case RTL_GIGA_MAC_VER_21:
4925 	case RTL_GIGA_MAC_VER_22:
4926 	case RTL_GIGA_MAC_VER_23:
4927 	case RTL_GIGA_MAC_VER_24:
4928 	case RTL_GIGA_MAC_VER_25:
4929 	case RTL_GIGA_MAC_VER_26:
4930 	case RTL_GIGA_MAC_VER_27:
4931 	case RTL_GIGA_MAC_VER_28:
4932 	case RTL_GIGA_MAC_VER_31:
4933 	case RTL_GIGA_MAC_VER_32:
4934 	case RTL_GIGA_MAC_VER_33:
4935 	case RTL_GIGA_MAC_VER_34:
4936 	case RTL_GIGA_MAC_VER_35:
4937 	case RTL_GIGA_MAC_VER_36:
4938 	case RTL_GIGA_MAC_VER_38:
4939 	case RTL_GIGA_MAC_VER_40:
4940 	case RTL_GIGA_MAC_VER_41:
4941 	case RTL_GIGA_MAC_VER_42:
4942 	case RTL_GIGA_MAC_VER_44:
4943 	case RTL_GIGA_MAC_VER_45:
4944 	case RTL_GIGA_MAC_VER_46:
4945 	case RTL_GIGA_MAC_VER_49:
4946 	case RTL_GIGA_MAC_VER_50:
4947 	case RTL_GIGA_MAC_VER_51:
4948 		ops->down	= r8168_pll_power_down;
4949 		ops->up		= r8168_pll_power_up;
4950 		break;
4951 
4952 	default:
4953 		ops->down	= NULL;
4954 		ops->up		= NULL;
4955 		break;
4956 	}
4957 }
4958 
rtl_init_rxcfg(struct rtl8169_private * tp)4959 static void rtl_init_rxcfg(struct rtl8169_private *tp)
4960 {
4961 	void __iomem *ioaddr = tp->mmio_addr;
4962 
4963 	switch (tp->mac_version) {
4964 	case RTL_GIGA_MAC_VER_01:
4965 	case RTL_GIGA_MAC_VER_02:
4966 	case RTL_GIGA_MAC_VER_03:
4967 	case RTL_GIGA_MAC_VER_04:
4968 	case RTL_GIGA_MAC_VER_05:
4969 	case RTL_GIGA_MAC_VER_06:
4970 	case RTL_GIGA_MAC_VER_10:
4971 	case RTL_GIGA_MAC_VER_11:
4972 	case RTL_GIGA_MAC_VER_12:
4973 	case RTL_GIGA_MAC_VER_13:
4974 	case RTL_GIGA_MAC_VER_14:
4975 	case RTL_GIGA_MAC_VER_15:
4976 	case RTL_GIGA_MAC_VER_16:
4977 	case RTL_GIGA_MAC_VER_17:
4978 		RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
4979 		break;
4980 	case RTL_GIGA_MAC_VER_18:
4981 	case RTL_GIGA_MAC_VER_19:
4982 	case RTL_GIGA_MAC_VER_20:
4983 	case RTL_GIGA_MAC_VER_21:
4984 	case RTL_GIGA_MAC_VER_22:
4985 	case RTL_GIGA_MAC_VER_23:
4986 	case RTL_GIGA_MAC_VER_24:
4987 	case RTL_GIGA_MAC_VER_34:
4988 	case RTL_GIGA_MAC_VER_35:
4989 		RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4990 		break;
4991 	case RTL_GIGA_MAC_VER_40:
4992 	case RTL_GIGA_MAC_VER_41:
4993 	case RTL_GIGA_MAC_VER_42:
4994 	case RTL_GIGA_MAC_VER_43:
4995 	case RTL_GIGA_MAC_VER_44:
4996 	case RTL_GIGA_MAC_VER_45:
4997 	case RTL_GIGA_MAC_VER_46:
4998 	case RTL_GIGA_MAC_VER_47:
4999 	case RTL_GIGA_MAC_VER_48:
5000 	case RTL_GIGA_MAC_VER_49:
5001 	case RTL_GIGA_MAC_VER_50:
5002 	case RTL_GIGA_MAC_VER_51:
5003 		RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
5004 		break;
5005 	default:
5006 		RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
5007 		break;
5008 	}
5009 }
5010 
rtl8169_init_ring_indexes(struct rtl8169_private * tp)5011 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
5012 {
5013 	tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
5014 }
5015 
rtl_hw_jumbo_enable(struct rtl8169_private * tp)5016 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
5017 {
5018 	void __iomem *ioaddr = tp->mmio_addr;
5019 
5020 	RTL_W8(Cfg9346, Cfg9346_Unlock);
5021 	rtl_generic_op(tp, tp->jumbo_ops.enable);
5022 	RTL_W8(Cfg9346, Cfg9346_Lock);
5023 }
5024 
rtl_hw_jumbo_disable(struct rtl8169_private * tp)5025 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
5026 {
5027 	void __iomem *ioaddr = tp->mmio_addr;
5028 
5029 	RTL_W8(Cfg9346, Cfg9346_Unlock);
5030 	rtl_generic_op(tp, tp->jumbo_ops.disable);
5031 	RTL_W8(Cfg9346, Cfg9346_Lock);
5032 }
5033 
r8168c_hw_jumbo_enable(struct rtl8169_private * tp)5034 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
5035 {
5036 	void __iomem *ioaddr = tp->mmio_addr;
5037 
5038 	RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
5039 	RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
5040 	rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B);
5041 }
5042 
r8168c_hw_jumbo_disable(struct rtl8169_private * tp)5043 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
5044 {
5045 	void __iomem *ioaddr = tp->mmio_addr;
5046 
5047 	RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
5048 	RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
5049 	rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5050 }
5051 
r8168dp_hw_jumbo_enable(struct rtl8169_private * tp)5052 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
5053 {
5054 	void __iomem *ioaddr = tp->mmio_addr;
5055 
5056 	RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
5057 }
5058 
r8168dp_hw_jumbo_disable(struct rtl8169_private * tp)5059 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
5060 {
5061 	void __iomem *ioaddr = tp->mmio_addr;
5062 
5063 	RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
5064 }
5065 
r8168e_hw_jumbo_enable(struct rtl8169_private * tp)5066 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
5067 {
5068 	void __iomem *ioaddr = tp->mmio_addr;
5069 
5070 	RTL_W8(MaxTxPacketSize, 0x3f);
5071 	RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
5072 	RTL_W8(Config4, RTL_R8(Config4) | 0x01);
5073 	rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B);
5074 }
5075 
r8168e_hw_jumbo_disable(struct rtl8169_private * tp)5076 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
5077 {
5078 	void __iomem *ioaddr = tp->mmio_addr;
5079 
5080 	RTL_W8(MaxTxPacketSize, 0x0c);
5081 	RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
5082 	RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
5083 	rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5084 }
5085 
r8168b_0_hw_jumbo_enable(struct rtl8169_private * tp)5086 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
5087 {
5088 	rtl_tx_performance_tweak(tp->pci_dev,
5089 		PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN);
5090 }
5091 
r8168b_0_hw_jumbo_disable(struct rtl8169_private * tp)5092 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
5093 {
5094 	rtl_tx_performance_tweak(tp->pci_dev,
5095 		(0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
5096 }
5097 
r8168b_1_hw_jumbo_enable(struct rtl8169_private * tp)5098 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
5099 {
5100 	void __iomem *ioaddr = tp->mmio_addr;
5101 
5102 	r8168b_0_hw_jumbo_enable(tp);
5103 
5104 	RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
5105 }
5106 
r8168b_1_hw_jumbo_disable(struct rtl8169_private * tp)5107 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
5108 {
5109 	void __iomem *ioaddr = tp->mmio_addr;
5110 
5111 	r8168b_0_hw_jumbo_disable(tp);
5112 
5113 	RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
5114 }
5115 
rtl_init_jumbo_ops(struct rtl8169_private * tp)5116 static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
5117 {
5118 	struct jumbo_ops *ops = &tp->jumbo_ops;
5119 
5120 	switch (tp->mac_version) {
5121 	case RTL_GIGA_MAC_VER_11:
5122 		ops->disable	= r8168b_0_hw_jumbo_disable;
5123 		ops->enable	= r8168b_0_hw_jumbo_enable;
5124 		break;
5125 	case RTL_GIGA_MAC_VER_12:
5126 	case RTL_GIGA_MAC_VER_17:
5127 		ops->disable	= r8168b_1_hw_jumbo_disable;
5128 		ops->enable	= r8168b_1_hw_jumbo_enable;
5129 		break;
5130 	case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
5131 	case RTL_GIGA_MAC_VER_19:
5132 	case RTL_GIGA_MAC_VER_20:
5133 	case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
5134 	case RTL_GIGA_MAC_VER_22:
5135 	case RTL_GIGA_MAC_VER_23:
5136 	case RTL_GIGA_MAC_VER_24:
5137 	case RTL_GIGA_MAC_VER_25:
5138 	case RTL_GIGA_MAC_VER_26:
5139 		ops->disable	= r8168c_hw_jumbo_disable;
5140 		ops->enable	= r8168c_hw_jumbo_enable;
5141 		break;
5142 	case RTL_GIGA_MAC_VER_27:
5143 	case RTL_GIGA_MAC_VER_28:
5144 		ops->disable	= r8168dp_hw_jumbo_disable;
5145 		ops->enable	= r8168dp_hw_jumbo_enable;
5146 		break;
5147 	case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
5148 	case RTL_GIGA_MAC_VER_32:
5149 	case RTL_GIGA_MAC_VER_33:
5150 	case RTL_GIGA_MAC_VER_34:
5151 		ops->disable	= r8168e_hw_jumbo_disable;
5152 		ops->enable	= r8168e_hw_jumbo_enable;
5153 		break;
5154 
5155 	/*
5156 	 * No action needed for jumbo frames with 8169.
5157 	 * No jumbo for 810x at all.
5158 	 */
5159 	case RTL_GIGA_MAC_VER_40:
5160 	case RTL_GIGA_MAC_VER_41:
5161 	case RTL_GIGA_MAC_VER_42:
5162 	case RTL_GIGA_MAC_VER_43:
5163 	case RTL_GIGA_MAC_VER_44:
5164 	case RTL_GIGA_MAC_VER_45:
5165 	case RTL_GIGA_MAC_VER_46:
5166 	case RTL_GIGA_MAC_VER_47:
5167 	case RTL_GIGA_MAC_VER_48:
5168 	case RTL_GIGA_MAC_VER_49:
5169 	case RTL_GIGA_MAC_VER_50:
5170 	case RTL_GIGA_MAC_VER_51:
5171 	default:
5172 		ops->disable	= NULL;
5173 		ops->enable	= NULL;
5174 		break;
5175 	}
5176 }
5177 
DECLARE_RTL_COND(rtl_chipcmd_cond)5178 DECLARE_RTL_COND(rtl_chipcmd_cond)
5179 {
5180 	void __iomem *ioaddr = tp->mmio_addr;
5181 
5182 	return RTL_R8(ChipCmd) & CmdReset;
5183 }
5184 
rtl_hw_reset(struct rtl8169_private * tp)5185 static void rtl_hw_reset(struct rtl8169_private *tp)
5186 {
5187 	void __iomem *ioaddr = tp->mmio_addr;
5188 
5189 	RTL_W8(ChipCmd, CmdReset);
5190 
5191 	rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
5192 }
5193 
rtl_request_uncached_firmware(struct rtl8169_private * tp)5194 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
5195 {
5196 	struct rtl_fw *rtl_fw;
5197 	const char *name;
5198 	int rc = -ENOMEM;
5199 
5200 	name = rtl_lookup_firmware_name(tp);
5201 	if (!name)
5202 		goto out_no_firmware;
5203 
5204 	rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
5205 	if (!rtl_fw)
5206 		goto err_warn;
5207 
5208 	rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
5209 	if (rc < 0)
5210 		goto err_free;
5211 
5212 	rc = rtl_check_firmware(tp, rtl_fw);
5213 	if (rc < 0)
5214 		goto err_release_firmware;
5215 
5216 	tp->rtl_fw = rtl_fw;
5217 out:
5218 	return;
5219 
5220 err_release_firmware:
5221 	release_firmware(rtl_fw->fw);
5222 err_free:
5223 	kfree(rtl_fw);
5224 err_warn:
5225 	netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
5226 		   name, rc);
5227 out_no_firmware:
5228 	tp->rtl_fw = NULL;
5229 	goto out;
5230 }
5231 
rtl_request_firmware(struct rtl8169_private * tp)5232 static void rtl_request_firmware(struct rtl8169_private *tp)
5233 {
5234 	if (IS_ERR(tp->rtl_fw))
5235 		rtl_request_uncached_firmware(tp);
5236 }
5237 
rtl_rx_close(struct rtl8169_private * tp)5238 static void rtl_rx_close(struct rtl8169_private *tp)
5239 {
5240 	void __iomem *ioaddr = tp->mmio_addr;
5241 
5242 	RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
5243 }
5244 
DECLARE_RTL_COND(rtl_npq_cond)5245 DECLARE_RTL_COND(rtl_npq_cond)
5246 {
5247 	void __iomem *ioaddr = tp->mmio_addr;
5248 
5249 	return RTL_R8(TxPoll) & NPQ;
5250 }
5251 
DECLARE_RTL_COND(rtl_txcfg_empty_cond)5252 DECLARE_RTL_COND(rtl_txcfg_empty_cond)
5253 {
5254 	void __iomem *ioaddr = tp->mmio_addr;
5255 
5256 	return RTL_R32(TxConfig) & TXCFG_EMPTY;
5257 }
5258 
rtl8169_hw_reset(struct rtl8169_private * tp)5259 static void rtl8169_hw_reset(struct rtl8169_private *tp)
5260 {
5261 	void __iomem *ioaddr = tp->mmio_addr;
5262 
5263 	/* Disable interrupts */
5264 	rtl8169_irq_mask_and_ack(tp);
5265 
5266 	rtl_rx_close(tp);
5267 
5268 	if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
5269 	    tp->mac_version == RTL_GIGA_MAC_VER_28 ||
5270 	    tp->mac_version == RTL_GIGA_MAC_VER_31) {
5271 		rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
5272 	} else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
5273 		   tp->mac_version == RTL_GIGA_MAC_VER_35 ||
5274 		   tp->mac_version == RTL_GIGA_MAC_VER_36 ||
5275 		   tp->mac_version == RTL_GIGA_MAC_VER_37 ||
5276 		   tp->mac_version == RTL_GIGA_MAC_VER_38 ||
5277 		   tp->mac_version == RTL_GIGA_MAC_VER_40 ||
5278 		   tp->mac_version == RTL_GIGA_MAC_VER_41 ||
5279 		   tp->mac_version == RTL_GIGA_MAC_VER_42 ||
5280 		   tp->mac_version == RTL_GIGA_MAC_VER_43 ||
5281 		   tp->mac_version == RTL_GIGA_MAC_VER_44 ||
5282 		   tp->mac_version == RTL_GIGA_MAC_VER_45 ||
5283 		   tp->mac_version == RTL_GIGA_MAC_VER_46 ||
5284 		   tp->mac_version == RTL_GIGA_MAC_VER_47 ||
5285 		   tp->mac_version == RTL_GIGA_MAC_VER_48 ||
5286 		   tp->mac_version == RTL_GIGA_MAC_VER_49 ||
5287 		   tp->mac_version == RTL_GIGA_MAC_VER_50 ||
5288 		   tp->mac_version == RTL_GIGA_MAC_VER_51) {
5289 		RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
5290 		rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
5291 	} else {
5292 		RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
5293 		udelay(100);
5294 	}
5295 
5296 	rtl_hw_reset(tp);
5297 }
5298 
rtl_set_rx_tx_config_registers(struct rtl8169_private * tp)5299 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
5300 {
5301 	void __iomem *ioaddr = tp->mmio_addr;
5302 
5303 	/* Set DMA burst size and Interframe Gap Time */
5304 	RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
5305 		(InterFrameGap << TxInterFrameGapShift));
5306 }
5307 
rtl_hw_start(struct net_device * dev)5308 static void rtl_hw_start(struct net_device *dev)
5309 {
5310 	struct rtl8169_private *tp = netdev_priv(dev);
5311 
5312 	tp->hw_start(dev);
5313 
5314 	rtl_irq_enable_all(tp);
5315 }
5316 
rtl_set_rx_tx_desc_registers(struct rtl8169_private * tp,void __iomem * ioaddr)5317 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
5318 					 void __iomem *ioaddr)
5319 {
5320 	/*
5321 	 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
5322 	 * register to be written before TxDescAddrLow to work.
5323 	 * Switching from MMIO to I/O access fixes the issue as well.
5324 	 */
5325 	RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
5326 	RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
5327 	RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
5328 	RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
5329 }
5330 
rtl_rw_cpluscmd(void __iomem * ioaddr)5331 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
5332 {
5333 	u16 cmd;
5334 
5335 	cmd = RTL_R16(CPlusCmd);
5336 	RTL_W16(CPlusCmd, cmd);
5337 	return cmd;
5338 }
5339 
rtl_set_rx_max_size(void __iomem * ioaddr,unsigned int rx_buf_sz)5340 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
5341 {
5342 	/* Low hurts. Let's disable the filtering. */
5343 	RTL_W16(RxMaxSize, rx_buf_sz + 1);
5344 }
5345 
rtl8169_set_magic_reg(void __iomem * ioaddr,unsigned mac_version)5346 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
5347 {
5348 	static const struct rtl_cfg2_info {
5349 		u32 mac_version;
5350 		u32 clk;
5351 		u32 val;
5352 	} cfg2_info [] = {
5353 		{ RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
5354 		{ RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
5355 		{ RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
5356 		{ RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
5357 	};
5358 	const struct rtl_cfg2_info *p = cfg2_info;
5359 	unsigned int i;
5360 	u32 clk;
5361 
5362 	clk = RTL_R8(Config2) & PCI_Clock_66MHz;
5363 	for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
5364 		if ((p->mac_version == mac_version) && (p->clk == clk)) {
5365 			RTL_W32(0x7c, p->val);
5366 			break;
5367 		}
5368 	}
5369 }
5370 
rtl_set_rx_mode(struct net_device * dev)5371 static void rtl_set_rx_mode(struct net_device *dev)
5372 {
5373 	struct rtl8169_private *tp = netdev_priv(dev);
5374 	void __iomem *ioaddr = tp->mmio_addr;
5375 	u32 mc_filter[2];	/* Multicast hash filter */
5376 	int rx_mode;
5377 	u32 tmp = 0;
5378 
5379 	if (dev->flags & IFF_PROMISC) {
5380 		/* Unconditionally log net taps. */
5381 		netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
5382 		rx_mode =
5383 		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
5384 		    AcceptAllPhys;
5385 		mc_filter[1] = mc_filter[0] = 0xffffffff;
5386 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
5387 		   (dev->flags & IFF_ALLMULTI)) {
5388 		/* Too many to filter perfectly -- accept all multicasts. */
5389 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
5390 		mc_filter[1] = mc_filter[0] = 0xffffffff;
5391 	} else {
5392 		struct netdev_hw_addr *ha;
5393 
5394 		rx_mode = AcceptBroadcast | AcceptMyPhys;
5395 		mc_filter[1] = mc_filter[0] = 0;
5396 		netdev_for_each_mc_addr(ha, dev) {
5397 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
5398 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
5399 			rx_mode |= AcceptMulticast;
5400 		}
5401 	}
5402 
5403 	if (dev->features & NETIF_F_RXALL)
5404 		rx_mode |= (AcceptErr | AcceptRunt);
5405 
5406 	tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
5407 
5408 	if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
5409 		u32 data = mc_filter[0];
5410 
5411 		mc_filter[0] = swab32(mc_filter[1]);
5412 		mc_filter[1] = swab32(data);
5413 	}
5414 
5415 	if (tp->mac_version == RTL_GIGA_MAC_VER_35)
5416 		mc_filter[1] = mc_filter[0] = 0xffffffff;
5417 
5418 	RTL_W32(MAR0 + 4, mc_filter[1]);
5419 	RTL_W32(MAR0 + 0, mc_filter[0]);
5420 
5421 	RTL_W32(RxConfig, tmp);
5422 }
5423 
rtl_hw_start_8169(struct net_device * dev)5424 static void rtl_hw_start_8169(struct net_device *dev)
5425 {
5426 	struct rtl8169_private *tp = netdev_priv(dev);
5427 	void __iomem *ioaddr = tp->mmio_addr;
5428 	struct pci_dev *pdev = tp->pci_dev;
5429 
5430 	if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
5431 		RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
5432 		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
5433 	}
5434 
5435 	RTL_W8(Cfg9346, Cfg9346_Unlock);
5436 	if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
5437 	    tp->mac_version == RTL_GIGA_MAC_VER_02 ||
5438 	    tp->mac_version == RTL_GIGA_MAC_VER_03 ||
5439 	    tp->mac_version == RTL_GIGA_MAC_VER_04)
5440 		RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5441 
5442 	rtl_init_rxcfg(tp);
5443 
5444 	RTL_W8(EarlyTxThres, NoEarlyTx);
5445 
5446 	rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5447 
5448 	if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
5449 	    tp->mac_version == RTL_GIGA_MAC_VER_02 ||
5450 	    tp->mac_version == RTL_GIGA_MAC_VER_03 ||
5451 	    tp->mac_version == RTL_GIGA_MAC_VER_04)
5452 		rtl_set_rx_tx_config_registers(tp);
5453 
5454 	tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
5455 
5456 	if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
5457 	    tp->mac_version == RTL_GIGA_MAC_VER_03) {
5458 		dprintk("Set MAC Reg C+CR Offset 0xe0. "
5459 			"Bit-3 and bit-14 MUST be 1\n");
5460 		tp->cp_cmd |= (1 << 14);
5461 	}
5462 
5463 	RTL_W16(CPlusCmd, tp->cp_cmd);
5464 
5465 	rtl8169_set_magic_reg(ioaddr, tp->mac_version);
5466 
5467 	/*
5468 	 * Undocumented corner. Supposedly:
5469 	 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
5470 	 */
5471 	RTL_W16(IntrMitigate, 0x0000);
5472 
5473 	rtl_set_rx_tx_desc_registers(tp, ioaddr);
5474 
5475 	if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
5476 	    tp->mac_version != RTL_GIGA_MAC_VER_02 &&
5477 	    tp->mac_version != RTL_GIGA_MAC_VER_03 &&
5478 	    tp->mac_version != RTL_GIGA_MAC_VER_04) {
5479 		RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5480 		rtl_set_rx_tx_config_registers(tp);
5481 	}
5482 
5483 	RTL_W8(Cfg9346, Cfg9346_Lock);
5484 
5485 	/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
5486 	RTL_R8(IntrMask);
5487 
5488 	RTL_W32(RxMissed, 0);
5489 
5490 	rtl_set_rx_mode(dev);
5491 
5492 	/* no early-rx interrupts */
5493 	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5494 }
5495 
rtl_csi_write(struct rtl8169_private * tp,int addr,int value)5496 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
5497 {
5498 	if (tp->csi_ops.write)
5499 		tp->csi_ops.write(tp, addr, value);
5500 }
5501 
rtl_csi_read(struct rtl8169_private * tp,int addr)5502 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
5503 {
5504 	return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
5505 }
5506 
rtl_csi_access_enable(struct rtl8169_private * tp,u32 bits)5507 static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
5508 {
5509 	u32 csi;
5510 
5511 	csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
5512 	rtl_csi_write(tp, 0x070c, csi | bits);
5513 }
5514 
rtl_csi_access_enable_1(struct rtl8169_private * tp)5515 static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
5516 {
5517 	rtl_csi_access_enable(tp, 0x17000000);
5518 }
5519 
rtl_csi_access_enable_2(struct rtl8169_private * tp)5520 static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
5521 {
5522 	rtl_csi_access_enable(tp, 0x27000000);
5523 }
5524 
DECLARE_RTL_COND(rtl_csiar_cond)5525 DECLARE_RTL_COND(rtl_csiar_cond)
5526 {
5527 	void __iomem *ioaddr = tp->mmio_addr;
5528 
5529 	return RTL_R32(CSIAR) & CSIAR_FLAG;
5530 }
5531 
r8169_csi_write(struct rtl8169_private * tp,int addr,int value)5532 static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
5533 {
5534 	void __iomem *ioaddr = tp->mmio_addr;
5535 
5536 	RTL_W32(CSIDR, value);
5537 	RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
5538 		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
5539 
5540 	rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
5541 }
5542 
r8169_csi_read(struct rtl8169_private * tp,int addr)5543 static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
5544 {
5545 	void __iomem *ioaddr = tp->mmio_addr;
5546 
5547 	RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
5548 		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
5549 
5550 	return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
5551 		RTL_R32(CSIDR) : ~0;
5552 }
5553 
r8402_csi_write(struct rtl8169_private * tp,int addr,int value)5554 static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
5555 {
5556 	void __iomem *ioaddr = tp->mmio_addr;
5557 
5558 	RTL_W32(CSIDR, value);
5559 	RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
5560 		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
5561 		CSIAR_FUNC_NIC);
5562 
5563 	rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
5564 }
5565 
r8402_csi_read(struct rtl8169_private * tp,int addr)5566 static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
5567 {
5568 	void __iomem *ioaddr = tp->mmio_addr;
5569 
5570 	RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
5571 		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
5572 
5573 	return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
5574 		RTL_R32(CSIDR) : ~0;
5575 }
5576 
r8411_csi_write(struct rtl8169_private * tp,int addr,int value)5577 static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value)
5578 {
5579 	void __iomem *ioaddr = tp->mmio_addr;
5580 
5581 	RTL_W32(CSIDR, value);
5582 	RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
5583 		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
5584 		CSIAR_FUNC_NIC2);
5585 
5586 	rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
5587 }
5588 
r8411_csi_read(struct rtl8169_private * tp,int addr)5589 static u32 r8411_csi_read(struct rtl8169_private *tp, int addr)
5590 {
5591 	void __iomem *ioaddr = tp->mmio_addr;
5592 
5593 	RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 |
5594 		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
5595 
5596 	return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
5597 		RTL_R32(CSIDR) : ~0;
5598 }
5599 
rtl_init_csi_ops(struct rtl8169_private * tp)5600 static void rtl_init_csi_ops(struct rtl8169_private *tp)
5601 {
5602 	struct csi_ops *ops = &tp->csi_ops;
5603 
5604 	switch (tp->mac_version) {
5605 	case RTL_GIGA_MAC_VER_01:
5606 	case RTL_GIGA_MAC_VER_02:
5607 	case RTL_GIGA_MAC_VER_03:
5608 	case RTL_GIGA_MAC_VER_04:
5609 	case RTL_GIGA_MAC_VER_05:
5610 	case RTL_GIGA_MAC_VER_06:
5611 	case RTL_GIGA_MAC_VER_10:
5612 	case RTL_GIGA_MAC_VER_11:
5613 	case RTL_GIGA_MAC_VER_12:
5614 	case RTL_GIGA_MAC_VER_13:
5615 	case RTL_GIGA_MAC_VER_14:
5616 	case RTL_GIGA_MAC_VER_15:
5617 	case RTL_GIGA_MAC_VER_16:
5618 	case RTL_GIGA_MAC_VER_17:
5619 		ops->write	= NULL;
5620 		ops->read	= NULL;
5621 		break;
5622 
5623 	case RTL_GIGA_MAC_VER_37:
5624 	case RTL_GIGA_MAC_VER_38:
5625 		ops->write	= r8402_csi_write;
5626 		ops->read	= r8402_csi_read;
5627 		break;
5628 
5629 	case RTL_GIGA_MAC_VER_44:
5630 		ops->write	= r8411_csi_write;
5631 		ops->read	= r8411_csi_read;
5632 		break;
5633 
5634 	default:
5635 		ops->write	= r8169_csi_write;
5636 		ops->read	= r8169_csi_read;
5637 		break;
5638 	}
5639 }
5640 
5641 struct ephy_info {
5642 	unsigned int offset;
5643 	u16 mask;
5644 	u16 bits;
5645 };
5646 
rtl_ephy_init(struct rtl8169_private * tp,const struct ephy_info * e,int len)5647 static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
5648 			  int len)
5649 {
5650 	u16 w;
5651 
5652 	while (len-- > 0) {
5653 		w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
5654 		rtl_ephy_write(tp, e->offset, w);
5655 		e++;
5656 	}
5657 }
5658 
rtl_disable_clock_request(struct pci_dev * pdev)5659 static void rtl_disable_clock_request(struct pci_dev *pdev)
5660 {
5661 	pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
5662 				   PCI_EXP_LNKCTL_CLKREQ_EN);
5663 }
5664 
rtl_enable_clock_request(struct pci_dev * pdev)5665 static void rtl_enable_clock_request(struct pci_dev *pdev)
5666 {
5667 	pcie_capability_set_word(pdev, PCI_EXP_LNKCTL,
5668 				 PCI_EXP_LNKCTL_CLKREQ_EN);
5669 }
5670 
rtl_pcie_state_l2l3_enable(struct rtl8169_private * tp,bool enable)5671 static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
5672 {
5673 	void __iomem *ioaddr = tp->mmio_addr;
5674 	u8 data;
5675 
5676 	data = RTL_R8(Config3);
5677 
5678 	if (enable)
5679 		data |= Rdy_to_L23;
5680 	else
5681 		data &= ~Rdy_to_L23;
5682 
5683 	RTL_W8(Config3, data);
5684 }
5685 
5686 #define R8168_CPCMD_QUIRK_MASK (\
5687 	EnableBist | \
5688 	Mac_dbgo_oe | \
5689 	Force_half_dup | \
5690 	Force_rxflow_en | \
5691 	Force_txflow_en | \
5692 	Cxpl_dbg_sel | \
5693 	ASF | \
5694 	PktCntrDisable | \
5695 	Mac_dbgo_sel)
5696 
rtl_hw_start_8168bb(struct rtl8169_private * tp)5697 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
5698 {
5699 	void __iomem *ioaddr = tp->mmio_addr;
5700 	struct pci_dev *pdev = tp->pci_dev;
5701 
5702 	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5703 
5704 	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
5705 
5706 	if (tp->dev->mtu <= ETH_DATA_LEN) {
5707 		rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) |
5708 					 PCI_EXP_DEVCTL_NOSNOOP_EN);
5709 	}
5710 }
5711 
rtl_hw_start_8168bef(struct rtl8169_private * tp)5712 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
5713 {
5714 	void __iomem *ioaddr = tp->mmio_addr;
5715 
5716 	rtl_hw_start_8168bb(tp);
5717 
5718 	RTL_W8(MaxTxPacketSize, TxPacketMax);
5719 
5720 	RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
5721 }
5722 
__rtl_hw_start_8168cp(struct rtl8169_private * tp)5723 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
5724 {
5725 	void __iomem *ioaddr = tp->mmio_addr;
5726 	struct pci_dev *pdev = tp->pci_dev;
5727 
5728 	RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
5729 
5730 	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5731 
5732 	if (tp->dev->mtu <= ETH_DATA_LEN)
5733 		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5734 
5735 	rtl_disable_clock_request(pdev);
5736 
5737 	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
5738 }
5739 
rtl_hw_start_8168cp_1(struct rtl8169_private * tp)5740 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
5741 {
5742 	static const struct ephy_info e_info_8168cp[] = {
5743 		{ 0x01, 0,	0x0001 },
5744 		{ 0x02, 0x0800,	0x1000 },
5745 		{ 0x03, 0,	0x0042 },
5746 		{ 0x06, 0x0080,	0x0000 },
5747 		{ 0x07, 0,	0x2000 }
5748 	};
5749 
5750 	rtl_csi_access_enable_2(tp);
5751 
5752 	rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
5753 
5754 	__rtl_hw_start_8168cp(tp);
5755 }
5756 
rtl_hw_start_8168cp_2(struct rtl8169_private * tp)5757 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
5758 {
5759 	void __iomem *ioaddr = tp->mmio_addr;
5760 	struct pci_dev *pdev = tp->pci_dev;
5761 
5762 	rtl_csi_access_enable_2(tp);
5763 
5764 	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5765 
5766 	if (tp->dev->mtu <= ETH_DATA_LEN)
5767 		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5768 
5769 	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
5770 }
5771 
rtl_hw_start_8168cp_3(struct rtl8169_private * tp)5772 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
5773 {
5774 	void __iomem *ioaddr = tp->mmio_addr;
5775 	struct pci_dev *pdev = tp->pci_dev;
5776 
5777 	rtl_csi_access_enable_2(tp);
5778 
5779 	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5780 
5781 	/* Magic. */
5782 	RTL_W8(DBG_REG, 0x20);
5783 
5784 	RTL_W8(MaxTxPacketSize, TxPacketMax);
5785 
5786 	if (tp->dev->mtu <= ETH_DATA_LEN)
5787 		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5788 
5789 	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
5790 }
5791 
rtl_hw_start_8168c_1(struct rtl8169_private * tp)5792 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
5793 {
5794 	void __iomem *ioaddr = tp->mmio_addr;
5795 	static const struct ephy_info e_info_8168c_1[] = {
5796 		{ 0x02, 0x0800,	0x1000 },
5797 		{ 0x03, 0,	0x0002 },
5798 		{ 0x06, 0x0080,	0x0000 }
5799 	};
5800 
5801 	rtl_csi_access_enable_2(tp);
5802 
5803 	RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
5804 
5805 	rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
5806 
5807 	__rtl_hw_start_8168cp(tp);
5808 }
5809 
rtl_hw_start_8168c_2(struct rtl8169_private * tp)5810 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
5811 {
5812 	static const struct ephy_info e_info_8168c_2[] = {
5813 		{ 0x01, 0,	0x0001 },
5814 		{ 0x03, 0x0400,	0x0220 }
5815 	};
5816 
5817 	rtl_csi_access_enable_2(tp);
5818 
5819 	rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
5820 
5821 	__rtl_hw_start_8168cp(tp);
5822 }
5823 
rtl_hw_start_8168c_3(struct rtl8169_private * tp)5824 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
5825 {
5826 	rtl_hw_start_8168c_2(tp);
5827 }
5828 
rtl_hw_start_8168c_4(struct rtl8169_private * tp)5829 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
5830 {
5831 	rtl_csi_access_enable_2(tp);
5832 
5833 	__rtl_hw_start_8168cp(tp);
5834 }
5835 
rtl_hw_start_8168d(struct rtl8169_private * tp)5836 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
5837 {
5838 	void __iomem *ioaddr = tp->mmio_addr;
5839 	struct pci_dev *pdev = tp->pci_dev;
5840 
5841 	rtl_csi_access_enable_2(tp);
5842 
5843 	rtl_disable_clock_request(pdev);
5844 
5845 	RTL_W8(MaxTxPacketSize, TxPacketMax);
5846 
5847 	if (tp->dev->mtu <= ETH_DATA_LEN)
5848 		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5849 
5850 	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
5851 }
5852 
rtl_hw_start_8168dp(struct rtl8169_private * tp)5853 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
5854 {
5855 	void __iomem *ioaddr = tp->mmio_addr;
5856 	struct pci_dev *pdev = tp->pci_dev;
5857 
5858 	rtl_csi_access_enable_1(tp);
5859 
5860 	if (tp->dev->mtu <= ETH_DATA_LEN)
5861 		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5862 
5863 	RTL_W8(MaxTxPacketSize, TxPacketMax);
5864 
5865 	rtl_disable_clock_request(pdev);
5866 }
5867 
rtl_hw_start_8168d_4(struct rtl8169_private * tp)5868 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
5869 {
5870 	void __iomem *ioaddr = tp->mmio_addr;
5871 	struct pci_dev *pdev = tp->pci_dev;
5872 	static const struct ephy_info e_info_8168d_4[] = {
5873 		{ 0x0b, 0x0000,	0x0048 },
5874 		{ 0x19, 0x0020,	0x0050 },
5875 		{ 0x0c, 0x0100,	0x0020 }
5876 	};
5877 
5878 	rtl_csi_access_enable_1(tp);
5879 
5880 	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5881 
5882 	RTL_W8(MaxTxPacketSize, TxPacketMax);
5883 
5884 	rtl_ephy_init(tp, e_info_8168d_4, ARRAY_SIZE(e_info_8168d_4));
5885 
5886 	rtl_enable_clock_request(pdev);
5887 }
5888 
rtl_hw_start_8168e_1(struct rtl8169_private * tp)5889 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
5890 {
5891 	void __iomem *ioaddr = tp->mmio_addr;
5892 	struct pci_dev *pdev = tp->pci_dev;
5893 	static const struct ephy_info e_info_8168e_1[] = {
5894 		{ 0x00, 0x0200,	0x0100 },
5895 		{ 0x00, 0x0000,	0x0004 },
5896 		{ 0x06, 0x0002,	0x0001 },
5897 		{ 0x06, 0x0000,	0x0030 },
5898 		{ 0x07, 0x0000,	0x2000 },
5899 		{ 0x00, 0x0000,	0x0020 },
5900 		{ 0x03, 0x5800,	0x2000 },
5901 		{ 0x03, 0x0000,	0x0001 },
5902 		{ 0x01, 0x0800,	0x1000 },
5903 		{ 0x07, 0x0000,	0x4000 },
5904 		{ 0x1e, 0x0000,	0x2000 },
5905 		{ 0x19, 0xffff,	0xfe6c },
5906 		{ 0x0a, 0x0000,	0x0040 }
5907 	};
5908 
5909 	rtl_csi_access_enable_2(tp);
5910 
5911 	rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
5912 
5913 	if (tp->dev->mtu <= ETH_DATA_LEN)
5914 		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5915 
5916 	RTL_W8(MaxTxPacketSize, TxPacketMax);
5917 
5918 	rtl_disable_clock_request(pdev);
5919 
5920 	/* Reset tx FIFO pointer */
5921 	RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
5922 	RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
5923 
5924 	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5925 }
5926 
rtl_hw_start_8168e_2(struct rtl8169_private * tp)5927 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5928 {
5929 	void __iomem *ioaddr = tp->mmio_addr;
5930 	struct pci_dev *pdev = tp->pci_dev;
5931 	static const struct ephy_info e_info_8168e_2[] = {
5932 		{ 0x09, 0x0000,	0x0080 },
5933 		{ 0x19, 0x0000,	0x0224 }
5934 	};
5935 
5936 	rtl_csi_access_enable_1(tp);
5937 
5938 	rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5939 
5940 	if (tp->dev->mtu <= ETH_DATA_LEN)
5941 		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5942 
5943 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5944 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5945 	rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5946 	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5947 	rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5948 	rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5949 	rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5950 	rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5951 
5952 	RTL_W8(MaxTxPacketSize, EarlySize);
5953 
5954 	rtl_disable_clock_request(pdev);
5955 
5956 	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5957 	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5958 
5959 	/* Adjust EEE LED frequency */
5960 	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5961 
5962 	RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5963 	RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5964 	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5965 }
5966 
rtl_hw_start_8168f(struct rtl8169_private * tp)5967 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5968 {
5969 	void __iomem *ioaddr = tp->mmio_addr;
5970 	struct pci_dev *pdev = tp->pci_dev;
5971 
5972 	rtl_csi_access_enable_2(tp);
5973 
5974 	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5975 
5976 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5977 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5978 	rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5979 	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5980 	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5981 	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5982 	rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5983 	rtl_w0w1_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5984 	rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5985 	rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5986 
5987 	RTL_W8(MaxTxPacketSize, EarlySize);
5988 
5989 	rtl_disable_clock_request(pdev);
5990 
5991 	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5992 	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5993 	RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5994 	RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5995 	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5996 }
5997 
rtl_hw_start_8168f_1(struct rtl8169_private * tp)5998 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5999 {
6000 	void __iomem *ioaddr = tp->mmio_addr;
6001 	static const struct ephy_info e_info_8168f_1[] = {
6002 		{ 0x06, 0x00c0,	0x0020 },
6003 		{ 0x08, 0x0001,	0x0002 },
6004 		{ 0x09, 0x0000,	0x0080 },
6005 		{ 0x19, 0x0000,	0x0224 }
6006 	};
6007 
6008 	rtl_hw_start_8168f(tp);
6009 
6010 	rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
6011 
6012 	rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
6013 
6014 	/* Adjust EEE LED frequency */
6015 	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
6016 }
6017 
rtl_hw_start_8411(struct rtl8169_private * tp)6018 static void rtl_hw_start_8411(struct rtl8169_private *tp)
6019 {
6020 	static const struct ephy_info e_info_8168f_1[] = {
6021 		{ 0x06, 0x00c0,	0x0020 },
6022 		{ 0x0f, 0xffff,	0x5200 },
6023 		{ 0x1e, 0x0000,	0x4000 },
6024 		{ 0x19, 0x0000,	0x0224 }
6025 	};
6026 
6027 	rtl_hw_start_8168f(tp);
6028 	rtl_pcie_state_l2l3_enable(tp, false);
6029 
6030 	rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
6031 
6032 	rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
6033 }
6034 
rtl_hw_start_8168g(struct rtl8169_private * tp)6035 static void rtl_hw_start_8168g(struct rtl8169_private *tp)
6036 {
6037 	void __iomem *ioaddr = tp->mmio_addr;
6038 	struct pci_dev *pdev = tp->pci_dev;
6039 
6040 	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
6041 
6042 	rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
6043 	rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
6044 	rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
6045 	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
6046 
6047 	rtl_csi_access_enable_1(tp);
6048 
6049 	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
6050 
6051 	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
6052 	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
6053 	rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC);
6054 
6055 	RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
6056 	RTL_W8(MaxTxPacketSize, EarlySize);
6057 
6058 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
6059 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
6060 
6061 	/* Adjust EEE LED frequency */
6062 	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
6063 
6064 	rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
6065 	rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
6066 
6067 	rtl_pcie_state_l2l3_enable(tp, false);
6068 }
6069 
rtl_hw_start_8168g_1(struct rtl8169_private * tp)6070 static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
6071 {
6072 	void __iomem *ioaddr = tp->mmio_addr;
6073 	static const struct ephy_info e_info_8168g_1[] = {
6074 		{ 0x00, 0x0000,	0x0008 },
6075 		{ 0x0c, 0x37d0,	0x0820 },
6076 		{ 0x1e, 0x0000,	0x0001 },
6077 		{ 0x19, 0x8000,	0x0000 }
6078 	};
6079 
6080 	rtl_hw_start_8168g(tp);
6081 
6082 	/* disable aspm and clock request before access ephy */
6083 	RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
6084 	RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
6085 	rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1));
6086 }
6087 
rtl_hw_start_8168g_2(struct rtl8169_private * tp)6088 static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
6089 {
6090 	void __iomem *ioaddr = tp->mmio_addr;
6091 	static const struct ephy_info e_info_8168g_2[] = {
6092 		{ 0x00, 0x0000,	0x0008 },
6093 		{ 0x0c, 0x3df0,	0x0200 },
6094 		{ 0x19, 0xffff,	0xfc00 },
6095 		{ 0x1e, 0xffff,	0x20eb }
6096 	};
6097 
6098 	rtl_hw_start_8168g(tp);
6099 
6100 	/* disable aspm and clock request before access ephy */
6101 	RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
6102 	RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
6103 	rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2));
6104 }
6105 
rtl_hw_start_8411_2(struct rtl8169_private * tp)6106 static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
6107 {
6108 	void __iomem *ioaddr = tp->mmio_addr;
6109 	static const struct ephy_info e_info_8411_2[] = {
6110 		{ 0x00, 0x0000,	0x0008 },
6111 		{ 0x0c, 0x3df0,	0x0200 },
6112 		{ 0x0f, 0xffff,	0x5200 },
6113 		{ 0x19, 0x0020,	0x0000 },
6114 		{ 0x1e, 0x0000,	0x2000 }
6115 	};
6116 
6117 	rtl_hw_start_8168g(tp);
6118 
6119 	/* disable aspm and clock request before access ephy */
6120 	RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
6121 	RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
6122 	rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
6123 }
6124 
rtl_hw_start_8168h_1(struct rtl8169_private * tp)6125 static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
6126 {
6127 	void __iomem *ioaddr = tp->mmio_addr;
6128 	struct pci_dev *pdev = tp->pci_dev;
6129 	int rg_saw_cnt;
6130 	u32 data;
6131 	static const struct ephy_info e_info_8168h_1[] = {
6132 		{ 0x1e, 0x0800,	0x0001 },
6133 		{ 0x1d, 0x0000,	0x0800 },
6134 		{ 0x05, 0xffff,	0x2089 },
6135 		{ 0x06, 0xffff,	0x5881 },
6136 		{ 0x04, 0xffff,	0x154a },
6137 		{ 0x01, 0xffff,	0x068b }
6138 	};
6139 
6140 	/* disable aspm and clock request before access ephy */
6141 	RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
6142 	RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
6143 	rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
6144 
6145 	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
6146 
6147 	rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
6148 	rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
6149 	rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
6150 	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
6151 
6152 	rtl_csi_access_enable_1(tp);
6153 
6154 	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
6155 
6156 	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
6157 	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
6158 
6159 	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_1111, 0x0010, 0x00, ERIAR_EXGMAC);
6160 
6161 	rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f00, 0x00, ERIAR_EXGMAC);
6162 
6163 	rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
6164 
6165 	RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
6166 	RTL_W8(MaxTxPacketSize, EarlySize);
6167 
6168 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
6169 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
6170 
6171 	/* Adjust EEE LED frequency */
6172 	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
6173 
6174 	RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
6175 	RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN);
6176 
6177 	RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN);
6178 
6179 	rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
6180 
6181 	rtl_pcie_state_l2l3_enable(tp, false);
6182 
6183 	rtl_writephy(tp, 0x1f, 0x0c42);
6184 	rg_saw_cnt = (rtl_readphy(tp, 0x13) & 0x3fff);
6185 	rtl_writephy(tp, 0x1f, 0x0000);
6186 	if (rg_saw_cnt > 0) {
6187 		u16 sw_cnt_1ms_ini;
6188 
6189 		sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
6190 		sw_cnt_1ms_ini &= 0x0fff;
6191 		data = r8168_mac_ocp_read(tp, 0xd412);
6192 		data &= ~0x0fff;
6193 		data |= sw_cnt_1ms_ini;
6194 		r8168_mac_ocp_write(tp, 0xd412, data);
6195 	}
6196 
6197 	data = r8168_mac_ocp_read(tp, 0xe056);
6198 	data &= ~0xf0;
6199 	data |= 0x70;
6200 	r8168_mac_ocp_write(tp, 0xe056, data);
6201 
6202 	data = r8168_mac_ocp_read(tp, 0xe052);
6203 	data &= ~0x6000;
6204 	data |= 0x8008;
6205 	r8168_mac_ocp_write(tp, 0xe052, data);
6206 
6207 	data = r8168_mac_ocp_read(tp, 0xe0d6);
6208 	data &= ~0x01ff;
6209 	data |= 0x017f;
6210 	r8168_mac_ocp_write(tp, 0xe0d6, data);
6211 
6212 	data = r8168_mac_ocp_read(tp, 0xd420);
6213 	data &= ~0x0fff;
6214 	data |= 0x047f;
6215 	r8168_mac_ocp_write(tp, 0xd420, data);
6216 
6217 	r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
6218 	r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
6219 	r8168_mac_ocp_write(tp, 0xc094, 0x0000);
6220 	r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
6221 }
6222 
rtl_hw_start_8168ep(struct rtl8169_private * tp)6223 static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
6224 {
6225 	void __iomem *ioaddr = tp->mmio_addr;
6226 	struct pci_dev *pdev = tp->pci_dev;
6227 
6228 	rtl8168ep_stop_cmac(tp);
6229 
6230 	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
6231 
6232 	rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
6233 	rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC);
6234 	rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
6235 	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
6236 
6237 	rtl_csi_access_enable_1(tp);
6238 
6239 	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
6240 
6241 	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
6242 	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
6243 
6244 	rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f80, 0x00, ERIAR_EXGMAC);
6245 
6246 	rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
6247 
6248 	RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
6249 	RTL_W8(MaxTxPacketSize, EarlySize);
6250 
6251 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
6252 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
6253 
6254 	/* Adjust EEE LED frequency */
6255 	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
6256 
6257 	rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
6258 
6259 	RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN);
6260 
6261 	rtl_pcie_state_l2l3_enable(tp, false);
6262 }
6263 
rtl_hw_start_8168ep_1(struct rtl8169_private * tp)6264 static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
6265 {
6266 	void __iomem *ioaddr = tp->mmio_addr;
6267 	static const struct ephy_info e_info_8168ep_1[] = {
6268 		{ 0x00, 0xffff,	0x10ab },
6269 		{ 0x06, 0xffff,	0xf030 },
6270 		{ 0x08, 0xffff,	0x2006 },
6271 		{ 0x0d, 0xffff,	0x1666 },
6272 		{ 0x0c, 0x3ff0,	0x0000 }
6273 	};
6274 
6275 	/* disable aspm and clock request before access ephy */
6276 	RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
6277 	RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
6278 	rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1));
6279 
6280 	rtl_hw_start_8168ep(tp);
6281 }
6282 
rtl_hw_start_8168ep_2(struct rtl8169_private * tp)6283 static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
6284 {
6285 	void __iomem *ioaddr = tp->mmio_addr;
6286 	static const struct ephy_info e_info_8168ep_2[] = {
6287 		{ 0x00, 0xffff,	0x10a3 },
6288 		{ 0x19, 0xffff,	0xfc00 },
6289 		{ 0x1e, 0xffff,	0x20ea }
6290 	};
6291 
6292 	/* disable aspm and clock request before access ephy */
6293 	RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
6294 	RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
6295 	rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2));
6296 
6297 	rtl_hw_start_8168ep(tp);
6298 
6299 	RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
6300 	RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN);
6301 }
6302 
rtl_hw_start_8168ep_3(struct rtl8169_private * tp)6303 static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
6304 {
6305 	void __iomem *ioaddr = tp->mmio_addr;
6306 	u32 data;
6307 	static const struct ephy_info e_info_8168ep_3[] = {
6308 		{ 0x00, 0xffff,	0x10a3 },
6309 		{ 0x19, 0xffff,	0x7c00 },
6310 		{ 0x1e, 0xffff,	0x20eb },
6311 		{ 0x0d, 0xffff,	0x1666 }
6312 	};
6313 
6314 	/* disable aspm and clock request before access ephy */
6315 	RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
6316 	RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
6317 	rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3));
6318 
6319 	rtl_hw_start_8168ep(tp);
6320 
6321 	RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
6322 	RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN);
6323 
6324 	data = r8168_mac_ocp_read(tp, 0xd3e2);
6325 	data &= 0xf000;
6326 	data |= 0x0271;
6327 	r8168_mac_ocp_write(tp, 0xd3e2, data);
6328 
6329 	data = r8168_mac_ocp_read(tp, 0xd3e4);
6330 	data &= 0xff00;
6331 	r8168_mac_ocp_write(tp, 0xd3e4, data);
6332 
6333 	data = r8168_mac_ocp_read(tp, 0xe860);
6334 	data |= 0x0080;
6335 	r8168_mac_ocp_write(tp, 0xe860, data);
6336 }
6337 
rtl_hw_start_8168(struct net_device * dev)6338 static void rtl_hw_start_8168(struct net_device *dev)
6339 {
6340 	struct rtl8169_private *tp = netdev_priv(dev);
6341 	void __iomem *ioaddr = tp->mmio_addr;
6342 
6343 	RTL_W8(Cfg9346, Cfg9346_Unlock);
6344 
6345 	RTL_W8(MaxTxPacketSize, TxPacketMax);
6346 
6347 	rtl_set_rx_max_size(ioaddr, rx_buf_sz);
6348 
6349 	tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
6350 
6351 	RTL_W16(CPlusCmd, tp->cp_cmd);
6352 
6353 	RTL_W16(IntrMitigate, 0x5151);
6354 
6355 	/* Work around for RxFIFO overflow. */
6356 	if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
6357 		tp->event_slow |= RxFIFOOver | PCSTimeout;
6358 		tp->event_slow &= ~RxOverflow;
6359 	}
6360 
6361 	rtl_set_rx_tx_desc_registers(tp, ioaddr);
6362 
6363 	rtl_set_rx_tx_config_registers(tp);
6364 
6365 	RTL_R8(IntrMask);
6366 
6367 	switch (tp->mac_version) {
6368 	case RTL_GIGA_MAC_VER_11:
6369 		rtl_hw_start_8168bb(tp);
6370 		break;
6371 
6372 	case RTL_GIGA_MAC_VER_12:
6373 	case RTL_GIGA_MAC_VER_17:
6374 		rtl_hw_start_8168bef(tp);
6375 		break;
6376 
6377 	case RTL_GIGA_MAC_VER_18:
6378 		rtl_hw_start_8168cp_1(tp);
6379 		break;
6380 
6381 	case RTL_GIGA_MAC_VER_19:
6382 		rtl_hw_start_8168c_1(tp);
6383 		break;
6384 
6385 	case RTL_GIGA_MAC_VER_20:
6386 		rtl_hw_start_8168c_2(tp);
6387 		break;
6388 
6389 	case RTL_GIGA_MAC_VER_21:
6390 		rtl_hw_start_8168c_3(tp);
6391 		break;
6392 
6393 	case RTL_GIGA_MAC_VER_22:
6394 		rtl_hw_start_8168c_4(tp);
6395 		break;
6396 
6397 	case RTL_GIGA_MAC_VER_23:
6398 		rtl_hw_start_8168cp_2(tp);
6399 		break;
6400 
6401 	case RTL_GIGA_MAC_VER_24:
6402 		rtl_hw_start_8168cp_3(tp);
6403 		break;
6404 
6405 	case RTL_GIGA_MAC_VER_25:
6406 	case RTL_GIGA_MAC_VER_26:
6407 	case RTL_GIGA_MAC_VER_27:
6408 		rtl_hw_start_8168d(tp);
6409 		break;
6410 
6411 	case RTL_GIGA_MAC_VER_28:
6412 		rtl_hw_start_8168d_4(tp);
6413 		break;
6414 
6415 	case RTL_GIGA_MAC_VER_31:
6416 		rtl_hw_start_8168dp(tp);
6417 		break;
6418 
6419 	case RTL_GIGA_MAC_VER_32:
6420 	case RTL_GIGA_MAC_VER_33:
6421 		rtl_hw_start_8168e_1(tp);
6422 		break;
6423 	case RTL_GIGA_MAC_VER_34:
6424 		rtl_hw_start_8168e_2(tp);
6425 		break;
6426 
6427 	case RTL_GIGA_MAC_VER_35:
6428 	case RTL_GIGA_MAC_VER_36:
6429 		rtl_hw_start_8168f_1(tp);
6430 		break;
6431 
6432 	case RTL_GIGA_MAC_VER_38:
6433 		rtl_hw_start_8411(tp);
6434 		break;
6435 
6436 	case RTL_GIGA_MAC_VER_40:
6437 	case RTL_GIGA_MAC_VER_41:
6438 		rtl_hw_start_8168g_1(tp);
6439 		break;
6440 	case RTL_GIGA_MAC_VER_42:
6441 		rtl_hw_start_8168g_2(tp);
6442 		break;
6443 
6444 	case RTL_GIGA_MAC_VER_44:
6445 		rtl_hw_start_8411_2(tp);
6446 		break;
6447 
6448 	case RTL_GIGA_MAC_VER_45:
6449 	case RTL_GIGA_MAC_VER_46:
6450 		rtl_hw_start_8168h_1(tp);
6451 		break;
6452 
6453 	case RTL_GIGA_MAC_VER_49:
6454 		rtl_hw_start_8168ep_1(tp);
6455 		break;
6456 
6457 	case RTL_GIGA_MAC_VER_50:
6458 		rtl_hw_start_8168ep_2(tp);
6459 		break;
6460 
6461 	case RTL_GIGA_MAC_VER_51:
6462 		rtl_hw_start_8168ep_3(tp);
6463 		break;
6464 
6465 	default:
6466 		printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
6467 			dev->name, tp->mac_version);
6468 		break;
6469 	}
6470 
6471 	RTL_W8(Cfg9346, Cfg9346_Lock);
6472 
6473 	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
6474 
6475 	rtl_set_rx_mode(dev);
6476 
6477 	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
6478 }
6479 
6480 #define R810X_CPCMD_QUIRK_MASK (\
6481 	EnableBist | \
6482 	Mac_dbgo_oe | \
6483 	Force_half_dup | \
6484 	Force_rxflow_en | \
6485 	Force_txflow_en | \
6486 	Cxpl_dbg_sel | \
6487 	ASF | \
6488 	PktCntrDisable | \
6489 	Mac_dbgo_sel)
6490 
rtl_hw_start_8102e_1(struct rtl8169_private * tp)6491 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
6492 {
6493 	void __iomem *ioaddr = tp->mmio_addr;
6494 	struct pci_dev *pdev = tp->pci_dev;
6495 	static const struct ephy_info e_info_8102e_1[] = {
6496 		{ 0x01,	0, 0x6e65 },
6497 		{ 0x02,	0, 0x091f },
6498 		{ 0x03,	0, 0xc2f9 },
6499 		{ 0x06,	0, 0xafb5 },
6500 		{ 0x07,	0, 0x0e00 },
6501 		{ 0x19,	0, 0xec80 },
6502 		{ 0x01,	0, 0x2e65 },
6503 		{ 0x01,	0, 0x6e65 }
6504 	};
6505 	u8 cfg1;
6506 
6507 	rtl_csi_access_enable_2(tp);
6508 
6509 	RTL_W8(DBG_REG, FIX_NAK_1);
6510 
6511 	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
6512 
6513 	RTL_W8(Config1,
6514 	       LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
6515 	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
6516 
6517 	cfg1 = RTL_R8(Config1);
6518 	if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
6519 		RTL_W8(Config1, cfg1 & ~LEDS0);
6520 
6521 	rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
6522 }
6523 
rtl_hw_start_8102e_2(struct rtl8169_private * tp)6524 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
6525 {
6526 	void __iomem *ioaddr = tp->mmio_addr;
6527 	struct pci_dev *pdev = tp->pci_dev;
6528 
6529 	rtl_csi_access_enable_2(tp);
6530 
6531 	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
6532 
6533 	RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
6534 	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
6535 }
6536 
rtl_hw_start_8102e_3(struct rtl8169_private * tp)6537 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
6538 {
6539 	rtl_hw_start_8102e_2(tp);
6540 
6541 	rtl_ephy_write(tp, 0x03, 0xc2f9);
6542 }
6543 
rtl_hw_start_8105e_1(struct rtl8169_private * tp)6544 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
6545 {
6546 	void __iomem *ioaddr = tp->mmio_addr;
6547 	static const struct ephy_info e_info_8105e_1[] = {
6548 		{ 0x07,	0, 0x4000 },
6549 		{ 0x19,	0, 0x0200 },
6550 		{ 0x19,	0, 0x0020 },
6551 		{ 0x1e,	0, 0x2000 },
6552 		{ 0x03,	0, 0x0001 },
6553 		{ 0x19,	0, 0x0100 },
6554 		{ 0x19,	0, 0x0004 },
6555 		{ 0x0a,	0, 0x0020 }
6556 	};
6557 
6558 	/* Force LAN exit from ASPM if Rx/Tx are not idle */
6559 	RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
6560 
6561 	/* Disable Early Tally Counter */
6562 	RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
6563 
6564 	RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
6565 	RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
6566 
6567 	rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
6568 
6569 	rtl_pcie_state_l2l3_enable(tp, false);
6570 }
6571 
rtl_hw_start_8105e_2(struct rtl8169_private * tp)6572 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
6573 {
6574 	rtl_hw_start_8105e_1(tp);
6575 	rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
6576 }
6577 
rtl_hw_start_8402(struct rtl8169_private * tp)6578 static void rtl_hw_start_8402(struct rtl8169_private *tp)
6579 {
6580 	void __iomem *ioaddr = tp->mmio_addr;
6581 	static const struct ephy_info e_info_8402[] = {
6582 		{ 0x19,	0xffff, 0xff64 },
6583 		{ 0x1e,	0, 0x4000 }
6584 	};
6585 
6586 	rtl_csi_access_enable_2(tp);
6587 
6588 	/* Force LAN exit from ASPM if Rx/Tx are not idle */
6589 	RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
6590 
6591 	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
6592 	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6593 
6594 	rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
6595 
6596 	rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
6597 
6598 	rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
6599 	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
6600 	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
6601 	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
6602 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
6603 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
6604 	rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
6605 
6606 	rtl_pcie_state_l2l3_enable(tp, false);
6607 }
6608 
rtl_hw_start_8106(struct rtl8169_private * tp)6609 static void rtl_hw_start_8106(struct rtl8169_private *tp)
6610 {
6611 	void __iomem *ioaddr = tp->mmio_addr;
6612 
6613 	/* Force LAN exit from ASPM if Rx/Tx are not idle */
6614 	RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
6615 
6616 	RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
6617 	RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
6618 	RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
6619 
6620 	rtl_pcie_state_l2l3_enable(tp, false);
6621 }
6622 
rtl_hw_start_8101(struct net_device * dev)6623 static void rtl_hw_start_8101(struct net_device *dev)
6624 {
6625 	struct rtl8169_private *tp = netdev_priv(dev);
6626 	void __iomem *ioaddr = tp->mmio_addr;
6627 	struct pci_dev *pdev = tp->pci_dev;
6628 
6629 	if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
6630 		tp->event_slow &= ~RxFIFOOver;
6631 
6632 	if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
6633 	    tp->mac_version == RTL_GIGA_MAC_VER_16)
6634 		pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
6635 					 PCI_EXP_DEVCTL_NOSNOOP_EN);
6636 
6637 	RTL_W8(Cfg9346, Cfg9346_Unlock);
6638 
6639 	RTL_W8(MaxTxPacketSize, TxPacketMax);
6640 
6641 	rtl_set_rx_max_size(ioaddr, rx_buf_sz);
6642 
6643 	tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
6644 	RTL_W16(CPlusCmd, tp->cp_cmd);
6645 
6646 	rtl_set_rx_tx_desc_registers(tp, ioaddr);
6647 
6648 	rtl_set_rx_tx_config_registers(tp);
6649 
6650 	switch (tp->mac_version) {
6651 	case RTL_GIGA_MAC_VER_07:
6652 		rtl_hw_start_8102e_1(tp);
6653 		break;
6654 
6655 	case RTL_GIGA_MAC_VER_08:
6656 		rtl_hw_start_8102e_3(tp);
6657 		break;
6658 
6659 	case RTL_GIGA_MAC_VER_09:
6660 		rtl_hw_start_8102e_2(tp);
6661 		break;
6662 
6663 	case RTL_GIGA_MAC_VER_29:
6664 		rtl_hw_start_8105e_1(tp);
6665 		break;
6666 	case RTL_GIGA_MAC_VER_30:
6667 		rtl_hw_start_8105e_2(tp);
6668 		break;
6669 
6670 	case RTL_GIGA_MAC_VER_37:
6671 		rtl_hw_start_8402(tp);
6672 		break;
6673 
6674 	case RTL_GIGA_MAC_VER_39:
6675 		rtl_hw_start_8106(tp);
6676 		break;
6677 	case RTL_GIGA_MAC_VER_43:
6678 		rtl_hw_start_8168g_2(tp);
6679 		break;
6680 	case RTL_GIGA_MAC_VER_47:
6681 	case RTL_GIGA_MAC_VER_48:
6682 		rtl_hw_start_8168h_1(tp);
6683 		break;
6684 	}
6685 
6686 	RTL_W8(Cfg9346, Cfg9346_Lock);
6687 
6688 	RTL_W16(IntrMitigate, 0x0000);
6689 
6690 	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
6691 
6692 	rtl_set_rx_mode(dev);
6693 
6694 	RTL_R8(IntrMask);
6695 
6696 	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
6697 }
6698 
rtl8169_change_mtu(struct net_device * dev,int new_mtu)6699 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
6700 {
6701 	struct rtl8169_private *tp = netdev_priv(dev);
6702 
6703 	if (new_mtu > ETH_DATA_LEN)
6704 		rtl_hw_jumbo_enable(tp);
6705 	else
6706 		rtl_hw_jumbo_disable(tp);
6707 
6708 	dev->mtu = new_mtu;
6709 	netdev_update_features(dev);
6710 
6711 	return 0;
6712 }
6713 
rtl8169_make_unusable_by_asic(struct RxDesc * desc)6714 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
6715 {
6716 	desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
6717 	desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
6718 }
6719 
rtl8169_free_rx_databuff(struct rtl8169_private * tp,void ** data_buff,struct RxDesc * desc)6720 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
6721 				     void **data_buff, struct RxDesc *desc)
6722 {
6723 	dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
6724 			 DMA_FROM_DEVICE);
6725 
6726 	kfree(*data_buff);
6727 	*data_buff = NULL;
6728 	rtl8169_make_unusable_by_asic(desc);
6729 }
6730 
rtl8169_mark_to_asic(struct RxDesc * desc,u32 rx_buf_sz)6731 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
6732 {
6733 	u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
6734 
6735 	/* Force memory writes to complete before releasing descriptor */
6736 	dma_wmb();
6737 
6738 	desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
6739 }
6740 
rtl8169_map_to_asic(struct RxDesc * desc,dma_addr_t mapping,u32 rx_buf_sz)6741 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
6742 				       u32 rx_buf_sz)
6743 {
6744 	desc->addr = cpu_to_le64(mapping);
6745 	rtl8169_mark_to_asic(desc, rx_buf_sz);
6746 }
6747 
rtl8169_align(void * data)6748 static inline void *rtl8169_align(void *data)
6749 {
6750 	return (void *)ALIGN((long)data, 16);
6751 }
6752 
rtl8169_alloc_rx_data(struct rtl8169_private * tp,struct RxDesc * desc)6753 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
6754 					     struct RxDesc *desc)
6755 {
6756 	void *data;
6757 	dma_addr_t mapping;
6758 	struct device *d = &tp->pci_dev->dev;
6759 	struct net_device *dev = tp->dev;
6760 	int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
6761 
6762 	data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
6763 	if (!data)
6764 		return NULL;
6765 
6766 	if (rtl8169_align(data) != data) {
6767 		kfree(data);
6768 		data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
6769 		if (!data)
6770 			return NULL;
6771 	}
6772 
6773 	mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
6774 				 DMA_FROM_DEVICE);
6775 	if (unlikely(dma_mapping_error(d, mapping))) {
6776 		if (net_ratelimit())
6777 			netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
6778 		goto err_out;
6779 	}
6780 
6781 	rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
6782 	return data;
6783 
6784 err_out:
6785 	kfree(data);
6786 	return NULL;
6787 }
6788 
rtl8169_rx_clear(struct rtl8169_private * tp)6789 static void rtl8169_rx_clear(struct rtl8169_private *tp)
6790 {
6791 	unsigned int i;
6792 
6793 	for (i = 0; i < NUM_RX_DESC; i++) {
6794 		if (tp->Rx_databuff[i]) {
6795 			rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
6796 					    tp->RxDescArray + i);
6797 		}
6798 	}
6799 }
6800 
rtl8169_mark_as_last_descriptor(struct RxDesc * desc)6801 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
6802 {
6803 	desc->opts1 |= cpu_to_le32(RingEnd);
6804 }
6805 
rtl8169_rx_fill(struct rtl8169_private * tp)6806 static int rtl8169_rx_fill(struct rtl8169_private *tp)
6807 {
6808 	unsigned int i;
6809 
6810 	for (i = 0; i < NUM_RX_DESC; i++) {
6811 		void *data;
6812 
6813 		if (tp->Rx_databuff[i])
6814 			continue;
6815 
6816 		data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
6817 		if (!data) {
6818 			rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
6819 			goto err_out;
6820 		}
6821 		tp->Rx_databuff[i] = data;
6822 	}
6823 
6824 	rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
6825 	return 0;
6826 
6827 err_out:
6828 	rtl8169_rx_clear(tp);
6829 	return -ENOMEM;
6830 }
6831 
rtl8169_init_ring(struct net_device * dev)6832 static int rtl8169_init_ring(struct net_device *dev)
6833 {
6834 	struct rtl8169_private *tp = netdev_priv(dev);
6835 
6836 	rtl8169_init_ring_indexes(tp);
6837 
6838 	memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
6839 	memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
6840 
6841 	return rtl8169_rx_fill(tp);
6842 }
6843 
rtl8169_unmap_tx_skb(struct device * d,struct ring_info * tx_skb,struct TxDesc * desc)6844 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
6845 				 struct TxDesc *desc)
6846 {
6847 	unsigned int len = tx_skb->len;
6848 
6849 	dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
6850 
6851 	desc->opts1 = 0x00;
6852 	desc->opts2 = 0x00;
6853 	desc->addr = 0x00;
6854 	tx_skb->len = 0;
6855 }
6856 
rtl8169_tx_clear_range(struct rtl8169_private * tp,u32 start,unsigned int n)6857 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
6858 				   unsigned int n)
6859 {
6860 	unsigned int i;
6861 
6862 	for (i = 0; i < n; i++) {
6863 		unsigned int entry = (start + i) % NUM_TX_DESC;
6864 		struct ring_info *tx_skb = tp->tx_skb + entry;
6865 		unsigned int len = tx_skb->len;
6866 
6867 		if (len) {
6868 			struct sk_buff *skb = tx_skb->skb;
6869 
6870 			rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
6871 					     tp->TxDescArray + entry);
6872 			if (skb) {
6873 				dev_consume_skb_any(skb);
6874 				tx_skb->skb = NULL;
6875 			}
6876 		}
6877 	}
6878 }
6879 
rtl8169_tx_clear(struct rtl8169_private * tp)6880 static void rtl8169_tx_clear(struct rtl8169_private *tp)
6881 {
6882 	rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
6883 	tp->cur_tx = tp->dirty_tx = 0;
6884 }
6885 
rtl_reset_work(struct rtl8169_private * tp)6886 static void rtl_reset_work(struct rtl8169_private *tp)
6887 {
6888 	struct net_device *dev = tp->dev;
6889 	int i;
6890 
6891 	napi_disable(&tp->napi);
6892 	netif_stop_queue(dev);
6893 	synchronize_sched();
6894 
6895 	rtl8169_hw_reset(tp);
6896 
6897 	for (i = 0; i < NUM_RX_DESC; i++)
6898 		rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
6899 
6900 	rtl8169_tx_clear(tp);
6901 	rtl8169_init_ring_indexes(tp);
6902 
6903 	napi_enable(&tp->napi);
6904 	rtl_hw_start(dev);
6905 	netif_wake_queue(dev);
6906 	rtl8169_check_link_status(dev, tp, tp->mmio_addr);
6907 }
6908 
rtl8169_tx_timeout(struct net_device * dev)6909 static void rtl8169_tx_timeout(struct net_device *dev)
6910 {
6911 	struct rtl8169_private *tp = netdev_priv(dev);
6912 
6913 	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6914 }
6915 
rtl8169_xmit_frags(struct rtl8169_private * tp,struct sk_buff * skb,u32 * opts)6916 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
6917 			      u32 *opts)
6918 {
6919 	struct skb_shared_info *info = skb_shinfo(skb);
6920 	unsigned int cur_frag, entry;
6921 	struct TxDesc *uninitialized_var(txd);
6922 	struct device *d = &tp->pci_dev->dev;
6923 
6924 	entry = tp->cur_tx;
6925 	for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
6926 		const skb_frag_t *frag = info->frags + cur_frag;
6927 		dma_addr_t mapping;
6928 		u32 status, len;
6929 		void *addr;
6930 
6931 		entry = (entry + 1) % NUM_TX_DESC;
6932 
6933 		txd = tp->TxDescArray + entry;
6934 		len = skb_frag_size(frag);
6935 		addr = skb_frag_address(frag);
6936 		mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
6937 		if (unlikely(dma_mapping_error(d, mapping))) {
6938 			if (net_ratelimit())
6939 				netif_err(tp, drv, tp->dev,
6940 					  "Failed to map TX fragments DMA!\n");
6941 			goto err_out;
6942 		}
6943 
6944 		/* Anti gcc 2.95.3 bugware (sic) */
6945 		status = opts[0] | len |
6946 			(RingEnd * !((entry + 1) % NUM_TX_DESC));
6947 
6948 		txd->opts1 = cpu_to_le32(status);
6949 		txd->opts2 = cpu_to_le32(opts[1]);
6950 		txd->addr = cpu_to_le64(mapping);
6951 
6952 		tp->tx_skb[entry].len = len;
6953 	}
6954 
6955 	if (cur_frag) {
6956 		tp->tx_skb[entry].skb = skb;
6957 		txd->opts1 |= cpu_to_le32(LastFrag);
6958 	}
6959 
6960 	return cur_frag;
6961 
6962 err_out:
6963 	rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
6964 	return -EIO;
6965 }
6966 
rtl_test_hw_pad_bug(struct rtl8169_private * tp,struct sk_buff * skb)6967 static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
6968 {
6969 	return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
6970 }
6971 
6972 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
6973 				      struct net_device *dev);
6974 /* r8169_csum_workaround()
6975  * The hw limites the value the transport offset. When the offset is out of the
6976  * range, calculate the checksum by sw.
6977  */
r8169_csum_workaround(struct rtl8169_private * tp,struct sk_buff * skb)6978 static void r8169_csum_workaround(struct rtl8169_private *tp,
6979 				  struct sk_buff *skb)
6980 {
6981 	if (skb_shinfo(skb)->gso_size) {
6982 		netdev_features_t features = tp->dev->features;
6983 		struct sk_buff *segs, *nskb;
6984 
6985 		features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
6986 		segs = skb_gso_segment(skb, features);
6987 		if (IS_ERR(segs) || !segs)
6988 			goto drop;
6989 
6990 		do {
6991 			nskb = segs;
6992 			segs = segs->next;
6993 			nskb->next = NULL;
6994 			rtl8169_start_xmit(nskb, tp->dev);
6995 		} while (segs);
6996 
6997 		dev_consume_skb_any(skb);
6998 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
6999 		if (skb_checksum_help(skb) < 0)
7000 			goto drop;
7001 
7002 		rtl8169_start_xmit(skb, tp->dev);
7003 	} else {
7004 		struct net_device_stats *stats;
7005 
7006 drop:
7007 		stats = &tp->dev->stats;
7008 		stats->tx_dropped++;
7009 		dev_kfree_skb_any(skb);
7010 	}
7011 }
7012 
7013 /* msdn_giant_send_check()
7014  * According to the document of microsoft, the TCP Pseudo Header excludes the
7015  * packet length for IPv6 TCP large packets.
7016  */
msdn_giant_send_check(struct sk_buff * skb)7017 static int msdn_giant_send_check(struct sk_buff *skb)
7018 {
7019 	const struct ipv6hdr *ipv6h;
7020 	struct tcphdr *th;
7021 	int ret;
7022 
7023 	ret = skb_cow_head(skb, 0);
7024 	if (ret)
7025 		return ret;
7026 
7027 	ipv6h = ipv6_hdr(skb);
7028 	th = tcp_hdr(skb);
7029 
7030 	th->check = 0;
7031 	th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
7032 
7033 	return ret;
7034 }
7035 
get_protocol(struct sk_buff * skb)7036 static inline __be16 get_protocol(struct sk_buff *skb)
7037 {
7038 	__be16 protocol;
7039 
7040 	if (skb->protocol == htons(ETH_P_8021Q))
7041 		protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
7042 	else
7043 		protocol = skb->protocol;
7044 
7045 	return protocol;
7046 }
7047 
rtl8169_tso_csum_v1(struct rtl8169_private * tp,struct sk_buff * skb,u32 * opts)7048 static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp,
7049 				struct sk_buff *skb, u32 *opts)
7050 {
7051 	u32 mss = skb_shinfo(skb)->gso_size;
7052 
7053 	if (mss) {
7054 		opts[0] |= TD_LSO;
7055 		opts[0] |= min(mss, TD_MSS_MAX) << TD0_MSS_SHIFT;
7056 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
7057 		const struct iphdr *ip = ip_hdr(skb);
7058 
7059 		if (ip->protocol == IPPROTO_TCP)
7060 			opts[0] |= TD0_IP_CS | TD0_TCP_CS;
7061 		else if (ip->protocol == IPPROTO_UDP)
7062 			opts[0] |= TD0_IP_CS | TD0_UDP_CS;
7063 		else
7064 			WARN_ON_ONCE(1);
7065 	}
7066 
7067 	return true;
7068 }
7069 
rtl8169_tso_csum_v2(struct rtl8169_private * tp,struct sk_buff * skb,u32 * opts)7070 static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
7071 				struct sk_buff *skb, u32 *opts)
7072 {
7073 	u32 transport_offset = (u32)skb_transport_offset(skb);
7074 	u32 mss = skb_shinfo(skb)->gso_size;
7075 
7076 	if (mss) {
7077 		if (transport_offset > GTTCPHO_MAX) {
7078 			netif_warn(tp, tx_err, tp->dev,
7079 				   "Invalid transport offset 0x%x for TSO\n",
7080 				   transport_offset);
7081 			return false;
7082 		}
7083 
7084 		switch (get_protocol(skb)) {
7085 		case htons(ETH_P_IP):
7086 			opts[0] |= TD1_GTSENV4;
7087 			break;
7088 
7089 		case htons(ETH_P_IPV6):
7090 			if (msdn_giant_send_check(skb))
7091 				return false;
7092 
7093 			opts[0] |= TD1_GTSENV6;
7094 			break;
7095 
7096 		default:
7097 			WARN_ON_ONCE(1);
7098 			break;
7099 		}
7100 
7101 		opts[0] |= transport_offset << GTTCPHO_SHIFT;
7102 		opts[1] |= min(mss, TD_MSS_MAX) << TD1_MSS_SHIFT;
7103 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
7104 		u8 ip_protocol;
7105 
7106 		if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
7107 			return !(skb_checksum_help(skb) || eth_skb_pad(skb));
7108 
7109 		if (transport_offset > TCPHO_MAX) {
7110 			netif_warn(tp, tx_err, tp->dev,
7111 				   "Invalid transport offset 0x%x\n",
7112 				   transport_offset);
7113 			return false;
7114 		}
7115 
7116 		switch (get_protocol(skb)) {
7117 		case htons(ETH_P_IP):
7118 			opts[1] |= TD1_IPv4_CS;
7119 			ip_protocol = ip_hdr(skb)->protocol;
7120 			break;
7121 
7122 		case htons(ETH_P_IPV6):
7123 			opts[1] |= TD1_IPv6_CS;
7124 			ip_protocol = ipv6_hdr(skb)->nexthdr;
7125 			break;
7126 
7127 		default:
7128 			ip_protocol = IPPROTO_RAW;
7129 			break;
7130 		}
7131 
7132 		if (ip_protocol == IPPROTO_TCP)
7133 			opts[1] |= TD1_TCP_CS;
7134 		else if (ip_protocol == IPPROTO_UDP)
7135 			opts[1] |= TD1_UDP_CS;
7136 		else
7137 			WARN_ON_ONCE(1);
7138 
7139 		opts[1] |= transport_offset << TCPHO_SHIFT;
7140 	} else {
7141 		if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
7142 			return !eth_skb_pad(skb);
7143 	}
7144 
7145 	return true;
7146 }
7147 
rtl8169_start_xmit(struct sk_buff * skb,struct net_device * dev)7148 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7149 				      struct net_device *dev)
7150 {
7151 	struct rtl8169_private *tp = netdev_priv(dev);
7152 	unsigned int entry = tp->cur_tx % NUM_TX_DESC;
7153 	struct TxDesc *txd = tp->TxDescArray + entry;
7154 	void __iomem *ioaddr = tp->mmio_addr;
7155 	struct device *d = &tp->pci_dev->dev;
7156 	dma_addr_t mapping;
7157 	u32 status, len;
7158 	u32 opts[2];
7159 	int frags;
7160 
7161 	if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
7162 		netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
7163 		goto err_stop_0;
7164 	}
7165 
7166 	if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
7167 		goto err_stop_0;
7168 
7169 	opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
7170 	opts[0] = DescOwn;
7171 
7172 	if (!tp->tso_csum(tp, skb, opts)) {
7173 		r8169_csum_workaround(tp, skb);
7174 		return NETDEV_TX_OK;
7175 	}
7176 
7177 	len = skb_headlen(skb);
7178 	mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
7179 	if (unlikely(dma_mapping_error(d, mapping))) {
7180 		if (net_ratelimit())
7181 			netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
7182 		goto err_dma_0;
7183 	}
7184 
7185 	tp->tx_skb[entry].len = len;
7186 	txd->addr = cpu_to_le64(mapping);
7187 
7188 	frags = rtl8169_xmit_frags(tp, skb, opts);
7189 	if (frags < 0)
7190 		goto err_dma_1;
7191 	else if (frags)
7192 		opts[0] |= FirstFrag;
7193 	else {
7194 		opts[0] |= FirstFrag | LastFrag;
7195 		tp->tx_skb[entry].skb = skb;
7196 	}
7197 
7198 	txd->opts2 = cpu_to_le32(opts[1]);
7199 
7200 	skb_tx_timestamp(skb);
7201 
7202 	/* Force memory writes to complete before releasing descriptor */
7203 	dma_wmb();
7204 
7205 	/* Anti gcc 2.95.3 bugware (sic) */
7206 	status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
7207 	txd->opts1 = cpu_to_le32(status);
7208 
7209 	/* Force all memory writes to complete before notifying device */
7210 	wmb();
7211 
7212 	tp->cur_tx += frags + 1;
7213 
7214 	RTL_W8(TxPoll, NPQ);
7215 
7216 	mmiowb();
7217 
7218 	if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
7219 		/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
7220 		 * not miss a ring update when it notices a stopped queue.
7221 		 */
7222 		smp_wmb();
7223 		netif_stop_queue(dev);
7224 		/* Sync with rtl_tx:
7225 		 * - publish queue status and cur_tx ring index (write barrier)
7226 		 * - refresh dirty_tx ring index (read barrier).
7227 		 * May the current thread have a pessimistic view of the ring
7228 		 * status and forget to wake up queue, a racing rtl_tx thread
7229 		 * can't.
7230 		 */
7231 		smp_mb();
7232 		if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
7233 			netif_wake_queue(dev);
7234 	}
7235 
7236 	return NETDEV_TX_OK;
7237 
7238 err_dma_1:
7239 	rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
7240 err_dma_0:
7241 	dev_kfree_skb_any(skb);
7242 	dev->stats.tx_dropped++;
7243 	return NETDEV_TX_OK;
7244 
7245 err_stop_0:
7246 	netif_stop_queue(dev);
7247 	dev->stats.tx_dropped++;
7248 	return NETDEV_TX_BUSY;
7249 }
7250 
rtl8169_pcierr_interrupt(struct net_device * dev)7251 static void rtl8169_pcierr_interrupt(struct net_device *dev)
7252 {
7253 	struct rtl8169_private *tp = netdev_priv(dev);
7254 	struct pci_dev *pdev = tp->pci_dev;
7255 	u16 pci_status, pci_cmd;
7256 
7257 	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
7258 	pci_read_config_word(pdev, PCI_STATUS, &pci_status);
7259 
7260 	netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
7261 		  pci_cmd, pci_status);
7262 
7263 	/*
7264 	 * The recovery sequence below admits a very elaborated explanation:
7265 	 * - it seems to work;
7266 	 * - I did not see what else could be done;
7267 	 * - it makes iop3xx happy.
7268 	 *
7269 	 * Feel free to adjust to your needs.
7270 	 */
7271 	if (pdev->broken_parity_status)
7272 		pci_cmd &= ~PCI_COMMAND_PARITY;
7273 	else
7274 		pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
7275 
7276 	pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
7277 
7278 	pci_write_config_word(pdev, PCI_STATUS,
7279 		pci_status & (PCI_STATUS_DETECTED_PARITY |
7280 		PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
7281 		PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
7282 
7283 	/* The infamous DAC f*ckup only happens at boot time */
7284 	if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) {
7285 		void __iomem *ioaddr = tp->mmio_addr;
7286 
7287 		netif_info(tp, intr, dev, "disabling PCI DAC\n");
7288 		tp->cp_cmd &= ~PCIDAC;
7289 		RTL_W16(CPlusCmd, tp->cp_cmd);
7290 		dev->features &= ~NETIF_F_HIGHDMA;
7291 	}
7292 
7293 	rtl8169_hw_reset(tp);
7294 
7295 	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
7296 }
7297 
rtl_tx(struct net_device * dev,struct rtl8169_private * tp)7298 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7299 {
7300 	unsigned int dirty_tx, tx_left;
7301 
7302 	dirty_tx = tp->dirty_tx;
7303 	smp_rmb();
7304 	tx_left = tp->cur_tx - dirty_tx;
7305 
7306 	while (tx_left > 0) {
7307 		unsigned int entry = dirty_tx % NUM_TX_DESC;
7308 		struct ring_info *tx_skb = tp->tx_skb + entry;
7309 		u32 status;
7310 
7311 		status = le32_to_cpu(tp->TxDescArray[entry].opts1);
7312 		if (status & DescOwn)
7313 			break;
7314 
7315 		/* This barrier is needed to keep us from reading
7316 		 * any other fields out of the Tx descriptor until
7317 		 * we know the status of DescOwn
7318 		 */
7319 		dma_rmb();
7320 
7321 		rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
7322 				     tp->TxDescArray + entry);
7323 		if (status & LastFrag) {
7324 			u64_stats_update_begin(&tp->tx_stats.syncp);
7325 			tp->tx_stats.packets++;
7326 			tp->tx_stats.bytes += tx_skb->skb->len;
7327 			u64_stats_update_end(&tp->tx_stats.syncp);
7328 			dev_consume_skb_any(tx_skb->skb);
7329 			tx_skb->skb = NULL;
7330 		}
7331 		dirty_tx++;
7332 		tx_left--;
7333 	}
7334 
7335 	if (tp->dirty_tx != dirty_tx) {
7336 		tp->dirty_tx = dirty_tx;
7337 		/* Sync with rtl8169_start_xmit:
7338 		 * - publish dirty_tx ring index (write barrier)
7339 		 * - refresh cur_tx ring index and queue status (read barrier)
7340 		 * May the current thread miss the stopped queue condition,
7341 		 * a racing xmit thread can only have a right view of the
7342 		 * ring status.
7343 		 */
7344 		smp_mb();
7345 		if (netif_queue_stopped(dev) &&
7346 		    TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
7347 			netif_wake_queue(dev);
7348 		}
7349 		/*
7350 		 * 8168 hack: TxPoll requests are lost when the Tx packets are
7351 		 * too close. Let's kick an extra TxPoll request when a burst
7352 		 * of start_xmit activity is detected (if it is not detected,
7353 		 * it is slow enough). -- FR
7354 		 */
7355 		if (tp->cur_tx != dirty_tx) {
7356 			void __iomem *ioaddr = tp->mmio_addr;
7357 
7358 			RTL_W8(TxPoll, NPQ);
7359 		}
7360 	}
7361 }
7362 
rtl8169_fragmented_frame(u32 status)7363 static inline int rtl8169_fragmented_frame(u32 status)
7364 {
7365 	return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
7366 }
7367 
rtl8169_rx_csum(struct sk_buff * skb,u32 opts1)7368 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
7369 {
7370 	u32 status = opts1 & RxProtoMask;
7371 
7372 	if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
7373 	    ((status == RxProtoUDP) && !(opts1 & UDPFail)))
7374 		skb->ip_summed = CHECKSUM_UNNECESSARY;
7375 	else
7376 		skb_checksum_none_assert(skb);
7377 }
7378 
rtl8169_try_rx_copy(void * data,struct rtl8169_private * tp,int pkt_size,dma_addr_t addr)7379 static struct sk_buff *rtl8169_try_rx_copy(void *data,
7380 					   struct rtl8169_private *tp,
7381 					   int pkt_size,
7382 					   dma_addr_t addr)
7383 {
7384 	struct sk_buff *skb;
7385 	struct device *d = &tp->pci_dev->dev;
7386 
7387 	data = rtl8169_align(data);
7388 	dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
7389 	prefetch(data);
7390 	skb = napi_alloc_skb(&tp->napi, pkt_size);
7391 	if (skb)
7392 		memcpy(skb->data, data, pkt_size);
7393 	dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
7394 
7395 	return skb;
7396 }
7397 
rtl_rx(struct net_device * dev,struct rtl8169_private * tp,u32 budget)7398 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
7399 {
7400 	unsigned int cur_rx, rx_left;
7401 	unsigned int count;
7402 
7403 	cur_rx = tp->cur_rx;
7404 
7405 	for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
7406 		unsigned int entry = cur_rx % NUM_RX_DESC;
7407 		struct RxDesc *desc = tp->RxDescArray + entry;
7408 		u32 status;
7409 
7410 		status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
7411 		if (status & DescOwn)
7412 			break;
7413 
7414 		/* This barrier is needed to keep us from reading
7415 		 * any other fields out of the Rx descriptor until
7416 		 * we know the status of DescOwn
7417 		 */
7418 		dma_rmb();
7419 
7420 		if (unlikely(status & RxRES)) {
7421 			netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
7422 				   status);
7423 			dev->stats.rx_errors++;
7424 			if (status & (RxRWT | RxRUNT))
7425 				dev->stats.rx_length_errors++;
7426 			if (status & RxCRC)
7427 				dev->stats.rx_crc_errors++;
7428 			if (status & RxFOVF) {
7429 				rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
7430 				dev->stats.rx_fifo_errors++;
7431 			}
7432 			if ((status & (RxRUNT | RxCRC)) &&
7433 			    !(status & (RxRWT | RxFOVF)) &&
7434 			    (dev->features & NETIF_F_RXALL))
7435 				goto process_pkt;
7436 		} else {
7437 			struct sk_buff *skb;
7438 			dma_addr_t addr;
7439 			int pkt_size;
7440 
7441 process_pkt:
7442 			addr = le64_to_cpu(desc->addr);
7443 			if (likely(!(dev->features & NETIF_F_RXFCS)))
7444 				pkt_size = (status & 0x00003fff) - 4;
7445 			else
7446 				pkt_size = status & 0x00003fff;
7447 
7448 			/*
7449 			 * The driver does not support incoming fragmented
7450 			 * frames. They are seen as a symptom of over-mtu
7451 			 * sized frames.
7452 			 */
7453 			if (unlikely(rtl8169_fragmented_frame(status))) {
7454 				dev->stats.rx_dropped++;
7455 				dev->stats.rx_length_errors++;
7456 				goto release_descriptor;
7457 			}
7458 
7459 			skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
7460 						  tp, pkt_size, addr);
7461 			if (!skb) {
7462 				dev->stats.rx_dropped++;
7463 				goto release_descriptor;
7464 			}
7465 
7466 			rtl8169_rx_csum(skb, status);
7467 			skb_put(skb, pkt_size);
7468 			skb->protocol = eth_type_trans(skb, dev);
7469 
7470 			rtl8169_rx_vlan_tag(desc, skb);
7471 
7472 			if (skb->pkt_type == PACKET_MULTICAST)
7473 				dev->stats.multicast++;
7474 
7475 			napi_gro_receive(&tp->napi, skb);
7476 
7477 			u64_stats_update_begin(&tp->rx_stats.syncp);
7478 			tp->rx_stats.packets++;
7479 			tp->rx_stats.bytes += pkt_size;
7480 			u64_stats_update_end(&tp->rx_stats.syncp);
7481 		}
7482 release_descriptor:
7483 		desc->opts2 = 0;
7484 		rtl8169_mark_to_asic(desc, rx_buf_sz);
7485 	}
7486 
7487 	count = cur_rx - tp->cur_rx;
7488 	tp->cur_rx = cur_rx;
7489 
7490 	return count;
7491 }
7492 
rtl8169_interrupt(int irq,void * dev_instance)7493 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
7494 {
7495 	struct net_device *dev = dev_instance;
7496 	struct rtl8169_private *tp = netdev_priv(dev);
7497 	int handled = 0;
7498 	u16 status;
7499 
7500 	status = rtl_get_events(tp);
7501 	if (status && status != 0xffff) {
7502 		status &= RTL_EVENT_NAPI | tp->event_slow;
7503 		if (status) {
7504 			handled = 1;
7505 
7506 			rtl_irq_disable(tp);
7507 			napi_schedule(&tp->napi);
7508 		}
7509 	}
7510 	return IRQ_RETVAL(handled);
7511 }
7512 
7513 /*
7514  * Workqueue context.
7515  */
rtl_slow_event_work(struct rtl8169_private * tp)7516 static void rtl_slow_event_work(struct rtl8169_private *tp)
7517 {
7518 	struct net_device *dev = tp->dev;
7519 	u16 status;
7520 
7521 	status = rtl_get_events(tp) & tp->event_slow;
7522 	rtl_ack_events(tp, status);
7523 
7524 	if (unlikely(status & RxFIFOOver)) {
7525 		switch (tp->mac_version) {
7526 		/* Work around for rx fifo overflow */
7527 		case RTL_GIGA_MAC_VER_11:
7528 			netif_stop_queue(dev);
7529 			/* XXX - Hack alert. See rtl_task(). */
7530 			set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
7531 		default:
7532 			break;
7533 		}
7534 	}
7535 
7536 	if (unlikely(status & SYSErr))
7537 		rtl8169_pcierr_interrupt(dev);
7538 
7539 	if (status & LinkChg)
7540 		__rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
7541 
7542 	rtl_irq_enable_all(tp);
7543 }
7544 
rtl_task(struct work_struct * work)7545 static void rtl_task(struct work_struct *work)
7546 {
7547 	static const struct {
7548 		int bitnr;
7549 		void (*action)(struct rtl8169_private *);
7550 	} rtl_work[] = {
7551 		/* XXX - keep rtl_slow_event_work() as first element. */
7552 		{ RTL_FLAG_TASK_SLOW_PENDING,	rtl_slow_event_work },
7553 		{ RTL_FLAG_TASK_RESET_PENDING,	rtl_reset_work },
7554 		{ RTL_FLAG_TASK_PHY_PENDING,	rtl_phy_work }
7555 	};
7556 	struct rtl8169_private *tp =
7557 		container_of(work, struct rtl8169_private, wk.work);
7558 	struct net_device *dev = tp->dev;
7559 	int i;
7560 
7561 	rtl_lock_work(tp);
7562 
7563 	if (!netif_running(dev) ||
7564 	    !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
7565 		goto out_unlock;
7566 
7567 	for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
7568 		bool pending;
7569 
7570 		pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
7571 		if (pending)
7572 			rtl_work[i].action(tp);
7573 	}
7574 
7575 out_unlock:
7576 	rtl_unlock_work(tp);
7577 }
7578 
rtl8169_poll(struct napi_struct * napi,int budget)7579 static int rtl8169_poll(struct napi_struct *napi, int budget)
7580 {
7581 	struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
7582 	struct net_device *dev = tp->dev;
7583 	u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
7584 	int work_done;
7585 	u16 status;
7586 
7587 	status = rtl_get_events(tp);
7588 	rtl_ack_events(tp, status & ~tp->event_slow);
7589 
7590 	work_done = rtl_rx(dev, tp, (u32) budget);
7591 
7592 	rtl_tx(dev, tp);
7593 
7594 	if (status & tp->event_slow) {
7595 		enable_mask &= ~tp->event_slow;
7596 
7597 		rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
7598 	}
7599 
7600 	if (work_done < budget) {
7601 		napi_complete_done(napi, work_done);
7602 
7603 		rtl_irq_enable(tp, enable_mask);
7604 		mmiowb();
7605 	}
7606 
7607 	return work_done;
7608 }
7609 
rtl8169_rx_missed(struct net_device * dev,void __iomem * ioaddr)7610 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
7611 {
7612 	struct rtl8169_private *tp = netdev_priv(dev);
7613 
7614 	if (tp->mac_version > RTL_GIGA_MAC_VER_06)
7615 		return;
7616 
7617 	dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
7618 	RTL_W32(RxMissed, 0);
7619 }
7620 
rtl8169_down(struct net_device * dev)7621 static void rtl8169_down(struct net_device *dev)
7622 {
7623 	struct rtl8169_private *tp = netdev_priv(dev);
7624 	void __iomem *ioaddr = tp->mmio_addr;
7625 
7626 	del_timer_sync(&tp->timer);
7627 
7628 	napi_disable(&tp->napi);
7629 	netif_stop_queue(dev);
7630 
7631 	rtl8169_hw_reset(tp);
7632 	/*
7633 	 * At this point device interrupts can not be enabled in any function,
7634 	 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
7635 	 * and napi is disabled (rtl8169_poll).
7636 	 */
7637 	rtl8169_rx_missed(dev, ioaddr);
7638 
7639 	/* Give a racing hard_start_xmit a few cycles to complete. */
7640 	synchronize_sched();
7641 
7642 	rtl8169_tx_clear(tp);
7643 
7644 	rtl8169_rx_clear(tp);
7645 
7646 	rtl_pll_power_down(tp);
7647 }
7648 
rtl8169_close(struct net_device * dev)7649 static int rtl8169_close(struct net_device *dev)
7650 {
7651 	struct rtl8169_private *tp = netdev_priv(dev);
7652 	struct pci_dev *pdev = tp->pci_dev;
7653 
7654 	pm_runtime_get_sync(&pdev->dev);
7655 
7656 	/* Update counters before going down */
7657 	rtl8169_update_counters(dev);
7658 
7659 	rtl_lock_work(tp);
7660 	/* Clear all task flags */
7661 	bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
7662 
7663 	rtl8169_down(dev);
7664 	rtl_unlock_work(tp);
7665 
7666 	cancel_work_sync(&tp->wk.work);
7667 
7668 	free_irq(pdev->irq, dev);
7669 
7670 	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
7671 			  tp->RxPhyAddr);
7672 	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
7673 			  tp->TxPhyAddr);
7674 	tp->TxDescArray = NULL;
7675 	tp->RxDescArray = NULL;
7676 
7677 	pm_runtime_put_sync(&pdev->dev);
7678 
7679 	return 0;
7680 }
7681 
7682 #ifdef CONFIG_NET_POLL_CONTROLLER
rtl8169_netpoll(struct net_device * dev)7683 static void rtl8169_netpoll(struct net_device *dev)
7684 {
7685 	struct rtl8169_private *tp = netdev_priv(dev);
7686 
7687 	rtl8169_interrupt(tp->pci_dev->irq, dev);
7688 }
7689 #endif
7690 
rtl_open(struct net_device * dev)7691 static int rtl_open(struct net_device *dev)
7692 {
7693 	struct rtl8169_private *tp = netdev_priv(dev);
7694 	void __iomem *ioaddr = tp->mmio_addr;
7695 	struct pci_dev *pdev = tp->pci_dev;
7696 	int retval = -ENOMEM;
7697 
7698 	pm_runtime_get_sync(&pdev->dev);
7699 
7700 	/*
7701 	 * Rx and Tx descriptors needs 256 bytes alignment.
7702 	 * dma_alloc_coherent provides more.
7703 	 */
7704 	tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
7705 					     &tp->TxPhyAddr, GFP_KERNEL);
7706 	if (!tp->TxDescArray)
7707 		goto err_pm_runtime_put;
7708 
7709 	tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
7710 					     &tp->RxPhyAddr, GFP_KERNEL);
7711 	if (!tp->RxDescArray)
7712 		goto err_free_tx_0;
7713 
7714 	retval = rtl8169_init_ring(dev);
7715 	if (retval < 0)
7716 		goto err_free_rx_1;
7717 
7718 	INIT_WORK(&tp->wk.work, rtl_task);
7719 
7720 	smp_mb();
7721 
7722 	rtl_request_firmware(tp);
7723 
7724 	retval = request_irq(pdev->irq, rtl8169_interrupt,
7725 			     (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
7726 			     dev->name, dev);
7727 	if (retval < 0)
7728 		goto err_release_fw_2;
7729 
7730 	rtl_lock_work(tp);
7731 
7732 	set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
7733 
7734 	napi_enable(&tp->napi);
7735 
7736 	rtl8169_init_phy(dev, tp);
7737 
7738 	__rtl8169_set_features(dev, dev->features);
7739 
7740 	rtl_pll_power_up(tp);
7741 
7742 	rtl_hw_start(dev);
7743 
7744 	if (!rtl8169_init_counter_offsets(dev))
7745 		netif_warn(tp, hw, dev, "counter reset/update failed\n");
7746 
7747 	netif_start_queue(dev);
7748 
7749 	rtl_unlock_work(tp);
7750 
7751 	tp->saved_wolopts = 0;
7752 	pm_runtime_put_noidle(&pdev->dev);
7753 
7754 	rtl8169_check_link_status(dev, tp, ioaddr);
7755 out:
7756 	return retval;
7757 
7758 err_release_fw_2:
7759 	rtl_release_firmware(tp);
7760 	rtl8169_rx_clear(tp);
7761 err_free_rx_1:
7762 	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
7763 			  tp->RxPhyAddr);
7764 	tp->RxDescArray = NULL;
7765 err_free_tx_0:
7766 	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
7767 			  tp->TxPhyAddr);
7768 	tp->TxDescArray = NULL;
7769 err_pm_runtime_put:
7770 	pm_runtime_put_noidle(&pdev->dev);
7771 	goto out;
7772 }
7773 
7774 static void
rtl8169_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)7775 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7776 {
7777 	struct rtl8169_private *tp = netdev_priv(dev);
7778 	void __iomem *ioaddr = tp->mmio_addr;
7779 	struct pci_dev *pdev = tp->pci_dev;
7780 	struct rtl8169_counters *counters = tp->counters;
7781 	unsigned int start;
7782 
7783 	pm_runtime_get_noresume(&pdev->dev);
7784 
7785 	if (netif_running(dev) && pm_runtime_active(&pdev->dev))
7786 		rtl8169_rx_missed(dev, ioaddr);
7787 
7788 	do {
7789 		start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
7790 		stats->rx_packets = tp->rx_stats.packets;
7791 		stats->rx_bytes	= tp->rx_stats.bytes;
7792 	} while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
7793 
7794 	do {
7795 		start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
7796 		stats->tx_packets = tp->tx_stats.packets;
7797 		stats->tx_bytes	= tp->tx_stats.bytes;
7798 	} while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
7799 
7800 	stats->rx_dropped	= dev->stats.rx_dropped;
7801 	stats->tx_dropped	= dev->stats.tx_dropped;
7802 	stats->rx_length_errors = dev->stats.rx_length_errors;
7803 	stats->rx_errors	= dev->stats.rx_errors;
7804 	stats->rx_crc_errors	= dev->stats.rx_crc_errors;
7805 	stats->rx_fifo_errors	= dev->stats.rx_fifo_errors;
7806 	stats->rx_missed_errors = dev->stats.rx_missed_errors;
7807 	stats->multicast	= dev->stats.multicast;
7808 
7809 	/*
7810 	 * Fetch additonal counter values missing in stats collected by driver
7811 	 * from tally counters.
7812 	 */
7813 	if (pm_runtime_active(&pdev->dev))
7814 		rtl8169_update_counters(dev);
7815 
7816 	/*
7817 	 * Subtract values fetched during initalization.
7818 	 * See rtl8169_init_counter_offsets for a description why we do that.
7819 	 */
7820 	stats->tx_errors = le64_to_cpu(counters->tx_errors) -
7821 		le64_to_cpu(tp->tc_offset.tx_errors);
7822 	stats->collisions = le32_to_cpu(counters->tx_multi_collision) -
7823 		le32_to_cpu(tp->tc_offset.tx_multi_collision);
7824 	stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
7825 		le16_to_cpu(tp->tc_offset.tx_aborted);
7826 
7827 	pm_runtime_put_noidle(&pdev->dev);
7828 }
7829 
rtl8169_net_suspend(struct net_device * dev)7830 static void rtl8169_net_suspend(struct net_device *dev)
7831 {
7832 	struct rtl8169_private *tp = netdev_priv(dev);
7833 
7834 	if (!netif_running(dev))
7835 		return;
7836 
7837 	netif_device_detach(dev);
7838 	netif_stop_queue(dev);
7839 
7840 	rtl_lock_work(tp);
7841 	napi_disable(&tp->napi);
7842 	/* Clear all task flags */
7843 	bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
7844 
7845 	rtl_unlock_work(tp);
7846 
7847 	rtl_pll_power_down(tp);
7848 }
7849 
7850 #ifdef CONFIG_PM
7851 
rtl8169_suspend(struct device * device)7852 static int rtl8169_suspend(struct device *device)
7853 {
7854 	struct pci_dev *pdev = to_pci_dev(device);
7855 	struct net_device *dev = pci_get_drvdata(pdev);
7856 
7857 	rtl8169_net_suspend(dev);
7858 
7859 	return 0;
7860 }
7861 
__rtl8169_resume(struct net_device * dev)7862 static void __rtl8169_resume(struct net_device *dev)
7863 {
7864 	struct rtl8169_private *tp = netdev_priv(dev);
7865 
7866 	netif_device_attach(dev);
7867 
7868 	rtl_pll_power_up(tp);
7869 
7870 	rtl_lock_work(tp);
7871 	napi_enable(&tp->napi);
7872 	set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
7873 	rtl_unlock_work(tp);
7874 
7875 	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
7876 }
7877 
rtl8169_resume(struct device * device)7878 static int rtl8169_resume(struct device *device)
7879 {
7880 	struct pci_dev *pdev = to_pci_dev(device);
7881 	struct net_device *dev = pci_get_drvdata(pdev);
7882 	struct rtl8169_private *tp = netdev_priv(dev);
7883 
7884 	rtl8169_init_phy(dev, tp);
7885 
7886 	if (netif_running(dev))
7887 		__rtl8169_resume(dev);
7888 
7889 	return 0;
7890 }
7891 
rtl8169_runtime_suspend(struct device * device)7892 static int rtl8169_runtime_suspend(struct device *device)
7893 {
7894 	struct pci_dev *pdev = to_pci_dev(device);
7895 	struct net_device *dev = pci_get_drvdata(pdev);
7896 	struct rtl8169_private *tp = netdev_priv(dev);
7897 
7898 	if (!tp->TxDescArray)
7899 		return 0;
7900 
7901 	rtl_lock_work(tp);
7902 	tp->saved_wolopts = __rtl8169_get_wol(tp);
7903 	__rtl8169_set_wol(tp, WAKE_ANY);
7904 	rtl_unlock_work(tp);
7905 
7906 	rtl8169_net_suspend(dev);
7907 
7908 	/* Update counters before going runtime suspend */
7909 	rtl8169_rx_missed(dev, tp->mmio_addr);
7910 	rtl8169_update_counters(dev);
7911 
7912 	return 0;
7913 }
7914 
rtl8169_runtime_resume(struct device * device)7915 static int rtl8169_runtime_resume(struct device *device)
7916 {
7917 	struct pci_dev *pdev = to_pci_dev(device);
7918 	struct net_device *dev = pci_get_drvdata(pdev);
7919 	struct rtl8169_private *tp = netdev_priv(dev);
7920 	rtl_rar_set(tp, dev->dev_addr);
7921 
7922 	if (!tp->TxDescArray)
7923 		return 0;
7924 
7925 	rtl_lock_work(tp);
7926 	__rtl8169_set_wol(tp, tp->saved_wolopts);
7927 	tp->saved_wolopts = 0;
7928 	rtl_unlock_work(tp);
7929 
7930 	rtl8169_init_phy(dev, tp);
7931 
7932 	__rtl8169_resume(dev);
7933 
7934 	return 0;
7935 }
7936 
rtl8169_runtime_idle(struct device * device)7937 static int rtl8169_runtime_idle(struct device *device)
7938 {
7939 	struct pci_dev *pdev = to_pci_dev(device);
7940 	struct net_device *dev = pci_get_drvdata(pdev);
7941 	struct rtl8169_private *tp = netdev_priv(dev);
7942 
7943 	return tp->TxDescArray ? -EBUSY : 0;
7944 }
7945 
7946 static const struct dev_pm_ops rtl8169_pm_ops = {
7947 	.suspend		= rtl8169_suspend,
7948 	.resume			= rtl8169_resume,
7949 	.freeze			= rtl8169_suspend,
7950 	.thaw			= rtl8169_resume,
7951 	.poweroff		= rtl8169_suspend,
7952 	.restore		= rtl8169_resume,
7953 	.runtime_suspend	= rtl8169_runtime_suspend,
7954 	.runtime_resume		= rtl8169_runtime_resume,
7955 	.runtime_idle		= rtl8169_runtime_idle,
7956 };
7957 
7958 #define RTL8169_PM_OPS	(&rtl8169_pm_ops)
7959 
7960 #else /* !CONFIG_PM */
7961 
7962 #define RTL8169_PM_OPS	NULL
7963 
7964 #endif /* !CONFIG_PM */
7965 
rtl_wol_shutdown_quirk(struct rtl8169_private * tp)7966 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
7967 {
7968 	void __iomem *ioaddr = tp->mmio_addr;
7969 
7970 	/* WoL fails with 8168b when the receiver is disabled. */
7971 	switch (tp->mac_version) {
7972 	case RTL_GIGA_MAC_VER_11:
7973 	case RTL_GIGA_MAC_VER_12:
7974 	case RTL_GIGA_MAC_VER_17:
7975 		pci_clear_master(tp->pci_dev);
7976 
7977 		RTL_W8(ChipCmd, CmdRxEnb);
7978 		/* PCI commit */
7979 		RTL_R8(ChipCmd);
7980 		break;
7981 	default:
7982 		break;
7983 	}
7984 }
7985 
rtl_shutdown(struct pci_dev * pdev)7986 static void rtl_shutdown(struct pci_dev *pdev)
7987 {
7988 	struct net_device *dev = pci_get_drvdata(pdev);
7989 	struct rtl8169_private *tp = netdev_priv(dev);
7990 	struct device *d = &pdev->dev;
7991 
7992 	pm_runtime_get_sync(d);
7993 
7994 	rtl8169_net_suspend(dev);
7995 
7996 	/* Restore original MAC address */
7997 	rtl_rar_set(tp, dev->perm_addr);
7998 
7999 	rtl8169_hw_reset(tp);
8000 
8001 	if (system_state == SYSTEM_POWER_OFF) {
8002 		if (__rtl8169_get_wol(tp) & WAKE_ANY) {
8003 			rtl_wol_suspend_quirk(tp);
8004 			rtl_wol_shutdown_quirk(tp);
8005 		}
8006 
8007 		pci_wake_from_d3(pdev, true);
8008 		pci_set_power_state(pdev, PCI_D3hot);
8009 	}
8010 
8011 	pm_runtime_put_noidle(d);
8012 }
8013 
rtl_remove_one(struct pci_dev * pdev)8014 static void rtl_remove_one(struct pci_dev *pdev)
8015 {
8016 	struct net_device *dev = pci_get_drvdata(pdev);
8017 	struct rtl8169_private *tp = netdev_priv(dev);
8018 
8019 	if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
8020 	     tp->mac_version == RTL_GIGA_MAC_VER_28 ||
8021 	     tp->mac_version == RTL_GIGA_MAC_VER_31 ||
8022 	     tp->mac_version == RTL_GIGA_MAC_VER_49 ||
8023 	     tp->mac_version == RTL_GIGA_MAC_VER_50 ||
8024 	     tp->mac_version == RTL_GIGA_MAC_VER_51) &&
8025 	    r8168_check_dash(tp)) {
8026 		rtl8168_driver_stop(tp);
8027 	}
8028 
8029 	netif_napi_del(&tp->napi);
8030 
8031 	unregister_netdev(dev);
8032 
8033 	dma_free_coherent(&tp->pci_dev->dev, sizeof(*tp->counters),
8034 			  tp->counters, tp->counters_phys_addr);
8035 
8036 	rtl_release_firmware(tp);
8037 
8038 	if (pci_dev_run_wake(pdev))
8039 		pm_runtime_get_noresume(&pdev->dev);
8040 
8041 	/* restore original MAC address */
8042 	rtl_rar_set(tp, dev->perm_addr);
8043 
8044 	rtl_disable_msi(pdev, tp);
8045 	rtl8169_release_board(pdev, dev, tp->mmio_addr);
8046 }
8047 
8048 static const struct net_device_ops rtl_netdev_ops = {
8049 	.ndo_open		= rtl_open,
8050 	.ndo_stop		= rtl8169_close,
8051 	.ndo_get_stats64	= rtl8169_get_stats64,
8052 	.ndo_start_xmit		= rtl8169_start_xmit,
8053 	.ndo_tx_timeout		= rtl8169_tx_timeout,
8054 	.ndo_validate_addr	= eth_validate_addr,
8055 	.ndo_change_mtu		= rtl8169_change_mtu,
8056 	.ndo_fix_features	= rtl8169_fix_features,
8057 	.ndo_set_features	= rtl8169_set_features,
8058 	.ndo_set_mac_address	= rtl_set_mac_address,
8059 	.ndo_do_ioctl		= rtl8169_ioctl,
8060 	.ndo_set_rx_mode	= rtl_set_rx_mode,
8061 #ifdef CONFIG_NET_POLL_CONTROLLER
8062 	.ndo_poll_controller	= rtl8169_netpoll,
8063 #endif
8064 
8065 };
8066 
8067 static const struct rtl_cfg_info {
8068 	void (*hw_start)(struct net_device *);
8069 	unsigned int region;
8070 	unsigned int align;
8071 	u16 event_slow;
8072 	unsigned features;
8073 	u8 default_ver;
8074 } rtl_cfg_infos [] = {
8075 	[RTL_CFG_0] = {
8076 		.hw_start	= rtl_hw_start_8169,
8077 		.region		= 1,
8078 		.align		= 0,
8079 		.event_slow	= SYSErr | LinkChg | RxOverflow | RxFIFOOver,
8080 		.features	= RTL_FEATURE_GMII,
8081 		.default_ver	= RTL_GIGA_MAC_VER_01,
8082 	},
8083 	[RTL_CFG_1] = {
8084 		.hw_start	= rtl_hw_start_8168,
8085 		.region		= 2,
8086 		.align		= 8,
8087 		.event_slow	= SYSErr | LinkChg | RxOverflow,
8088 		.features	= RTL_FEATURE_GMII | RTL_FEATURE_MSI,
8089 		.default_ver	= RTL_GIGA_MAC_VER_11,
8090 	},
8091 	[RTL_CFG_2] = {
8092 		.hw_start	= rtl_hw_start_8101,
8093 		.region		= 2,
8094 		.align		= 8,
8095 		.event_slow	= SYSErr | LinkChg | RxOverflow | RxFIFOOver |
8096 				  PCSTimeout,
8097 		.features	= RTL_FEATURE_MSI,
8098 		.default_ver	= RTL_GIGA_MAC_VER_13,
8099 	}
8100 };
8101 
8102 /* Cfg9346_Unlock assumed. */
rtl_try_msi(struct rtl8169_private * tp,const struct rtl_cfg_info * cfg)8103 static unsigned rtl_try_msi(struct rtl8169_private *tp,
8104 			    const struct rtl_cfg_info *cfg)
8105 {
8106 	void __iomem *ioaddr = tp->mmio_addr;
8107 	unsigned msi = 0;
8108 	u8 cfg2;
8109 
8110 	cfg2 = RTL_R8(Config2) & ~MSIEnable;
8111 	if (cfg->features & RTL_FEATURE_MSI) {
8112 		if (pci_enable_msi(tp->pci_dev)) {
8113 			netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
8114 		} else {
8115 			cfg2 |= MSIEnable;
8116 			msi = RTL_FEATURE_MSI;
8117 		}
8118 	}
8119 	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
8120 		RTL_W8(Config2, cfg2);
8121 	return msi;
8122 }
8123 
DECLARE_RTL_COND(rtl_link_list_ready_cond)8124 DECLARE_RTL_COND(rtl_link_list_ready_cond)
8125 {
8126 	void __iomem *ioaddr = tp->mmio_addr;
8127 
8128 	return RTL_R8(MCU) & LINK_LIST_RDY;
8129 }
8130 
DECLARE_RTL_COND(rtl_rxtx_empty_cond)8131 DECLARE_RTL_COND(rtl_rxtx_empty_cond)
8132 {
8133 	void __iomem *ioaddr = tp->mmio_addr;
8134 
8135 	return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
8136 }
8137 
rtl_hw_init_8168g(struct rtl8169_private * tp)8138 static void rtl_hw_init_8168g(struct rtl8169_private *tp)
8139 {
8140 	void __iomem *ioaddr = tp->mmio_addr;
8141 	u32 data;
8142 
8143 	tp->ocp_base = OCP_STD_PHY_BASE;
8144 
8145 	RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
8146 
8147 	if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
8148 		return;
8149 
8150 	if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
8151 		return;
8152 
8153 	RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
8154 	msleep(1);
8155 	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
8156 
8157 	data = r8168_mac_ocp_read(tp, 0xe8de);
8158 	data &= ~(1 << 14);
8159 	r8168_mac_ocp_write(tp, 0xe8de, data);
8160 
8161 	if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
8162 		return;
8163 
8164 	data = r8168_mac_ocp_read(tp, 0xe8de);
8165 	data |= (1 << 15);
8166 	r8168_mac_ocp_write(tp, 0xe8de, data);
8167 
8168 	if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
8169 		return;
8170 }
8171 
rtl_hw_init_8168ep(struct rtl8169_private * tp)8172 static void rtl_hw_init_8168ep(struct rtl8169_private *tp)
8173 {
8174 	rtl8168ep_stop_cmac(tp);
8175 	rtl_hw_init_8168g(tp);
8176 }
8177 
rtl_hw_initialize(struct rtl8169_private * tp)8178 static void rtl_hw_initialize(struct rtl8169_private *tp)
8179 {
8180 	switch (tp->mac_version) {
8181 	case RTL_GIGA_MAC_VER_40:
8182 	case RTL_GIGA_MAC_VER_41:
8183 	case RTL_GIGA_MAC_VER_42:
8184 	case RTL_GIGA_MAC_VER_43:
8185 	case RTL_GIGA_MAC_VER_44:
8186 	case RTL_GIGA_MAC_VER_45:
8187 	case RTL_GIGA_MAC_VER_46:
8188 	case RTL_GIGA_MAC_VER_47:
8189 	case RTL_GIGA_MAC_VER_48:
8190 		rtl_hw_init_8168g(tp);
8191 		break;
8192 	case RTL_GIGA_MAC_VER_49:
8193 	case RTL_GIGA_MAC_VER_50:
8194 	case RTL_GIGA_MAC_VER_51:
8195 		rtl_hw_init_8168ep(tp);
8196 		break;
8197 	default:
8198 		break;
8199 	}
8200 }
8201 
rtl_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)8202 static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8203 {
8204 	const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
8205 	const unsigned int region = cfg->region;
8206 	struct rtl8169_private *tp;
8207 	struct mii_if_info *mii;
8208 	struct net_device *dev;
8209 	void __iomem *ioaddr;
8210 	int chipset, i;
8211 	int rc;
8212 
8213 	if (netif_msg_drv(&debug)) {
8214 		printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
8215 		       MODULENAME, RTL8169_VERSION);
8216 	}
8217 
8218 	dev = alloc_etherdev(sizeof (*tp));
8219 	if (!dev) {
8220 		rc = -ENOMEM;
8221 		goto out;
8222 	}
8223 
8224 	SET_NETDEV_DEV(dev, &pdev->dev);
8225 	dev->netdev_ops = &rtl_netdev_ops;
8226 	tp = netdev_priv(dev);
8227 	tp->dev = dev;
8228 	tp->pci_dev = pdev;
8229 	tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
8230 
8231 	mii = &tp->mii;
8232 	mii->dev = dev;
8233 	mii->mdio_read = rtl_mdio_read;
8234 	mii->mdio_write = rtl_mdio_write;
8235 	mii->phy_id_mask = 0x1f;
8236 	mii->reg_num_mask = 0x1f;
8237 	mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
8238 
8239 	/* disable ASPM completely as that cause random device stop working
8240 	 * problems as well as full system hangs for some PCIe devices users */
8241 	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
8242 				     PCIE_LINK_STATE_CLKPM);
8243 
8244 	/* enable device (incl. PCI PM wakeup and hotplug setup) */
8245 	rc = pci_enable_device(pdev);
8246 	if (rc < 0) {
8247 		netif_err(tp, probe, dev, "enable failure\n");
8248 		goto err_out_free_dev_1;
8249 	}
8250 
8251 	if (pci_set_mwi(pdev) < 0)
8252 		netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
8253 
8254 	/* make sure PCI base addr 1 is MMIO */
8255 	if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
8256 		netif_err(tp, probe, dev,
8257 			  "region #%d not an MMIO resource, aborting\n",
8258 			  region);
8259 		rc = -ENODEV;
8260 		goto err_out_mwi_2;
8261 	}
8262 
8263 	/* check for weird/broken PCI region reporting */
8264 	if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
8265 		netif_err(tp, probe, dev,
8266 			  "Invalid PCI region size(s), aborting\n");
8267 		rc = -ENODEV;
8268 		goto err_out_mwi_2;
8269 	}
8270 
8271 	rc = pci_request_regions(pdev, MODULENAME);
8272 	if (rc < 0) {
8273 		netif_err(tp, probe, dev, "could not request regions\n");
8274 		goto err_out_mwi_2;
8275 	}
8276 
8277 	/* ioremap MMIO region */
8278 	ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
8279 	if (!ioaddr) {
8280 		netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
8281 		rc = -EIO;
8282 		goto err_out_free_res_3;
8283 	}
8284 	tp->mmio_addr = ioaddr;
8285 
8286 	if (!pci_is_pcie(pdev))
8287 		netif_info(tp, probe, dev, "not PCI Express\n");
8288 
8289 	/* Identify chip attached to board */
8290 	rtl8169_get_mac_version(tp, dev, cfg->default_ver);
8291 
8292 	tp->cp_cmd = 0;
8293 
8294 	if ((sizeof(dma_addr_t) > 4) &&
8295 	    (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) &&
8296 			      tp->mac_version >= RTL_GIGA_MAC_VER_18)) &&
8297 	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
8298 	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
8299 
8300 		/* CPlusCmd Dual Access Cycle is only needed for non-PCIe */
8301 		if (!pci_is_pcie(pdev))
8302 			tp->cp_cmd |= PCIDAC;
8303 		dev->features |= NETIF_F_HIGHDMA;
8304 	} else {
8305 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8306 		if (rc < 0) {
8307 			netif_err(tp, probe, dev, "DMA configuration failed\n");
8308 			goto err_out_unmap_4;
8309 		}
8310 	}
8311 
8312 	rtl_init_rxcfg(tp);
8313 
8314 	rtl_irq_disable(tp);
8315 
8316 	rtl_hw_initialize(tp);
8317 
8318 	rtl_hw_reset(tp);
8319 
8320 	rtl_ack_events(tp, 0xffff);
8321 
8322 	pci_set_master(pdev);
8323 
8324 	rtl_init_mdio_ops(tp);
8325 	rtl_init_pll_power_ops(tp);
8326 	rtl_init_jumbo_ops(tp);
8327 	rtl_init_csi_ops(tp);
8328 
8329 	rtl8169_print_mac_version(tp);
8330 
8331 	chipset = tp->mac_version;
8332 	tp->txd_version = rtl_chip_infos[chipset].txd_version;
8333 
8334 	RTL_W8(Cfg9346, Cfg9346_Unlock);
8335 	RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
8336 	RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus));
8337 	switch (tp->mac_version) {
8338 	case RTL_GIGA_MAC_VER_34:
8339 	case RTL_GIGA_MAC_VER_35:
8340 	case RTL_GIGA_MAC_VER_36:
8341 	case RTL_GIGA_MAC_VER_37:
8342 	case RTL_GIGA_MAC_VER_38:
8343 	case RTL_GIGA_MAC_VER_40:
8344 	case RTL_GIGA_MAC_VER_41:
8345 	case RTL_GIGA_MAC_VER_42:
8346 	case RTL_GIGA_MAC_VER_43:
8347 	case RTL_GIGA_MAC_VER_44:
8348 	case RTL_GIGA_MAC_VER_45:
8349 	case RTL_GIGA_MAC_VER_46:
8350 	case RTL_GIGA_MAC_VER_47:
8351 	case RTL_GIGA_MAC_VER_48:
8352 	case RTL_GIGA_MAC_VER_49:
8353 	case RTL_GIGA_MAC_VER_50:
8354 	case RTL_GIGA_MAC_VER_51:
8355 		if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
8356 			tp->features |= RTL_FEATURE_WOL;
8357 		if ((RTL_R8(Config3) & LinkUp) != 0)
8358 			tp->features |= RTL_FEATURE_WOL;
8359 		break;
8360 	default:
8361 		if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
8362 			tp->features |= RTL_FEATURE_WOL;
8363 		break;
8364 	}
8365 	if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
8366 		tp->features |= RTL_FEATURE_WOL;
8367 	tp->features |= rtl_try_msi(tp, cfg);
8368 	RTL_W8(Cfg9346, Cfg9346_Lock);
8369 
8370 	if (rtl_tbi_enabled(tp)) {
8371 		tp->set_speed = rtl8169_set_speed_tbi;
8372 		tp->get_link_ksettings = rtl8169_get_link_ksettings_tbi;
8373 		tp->phy_reset_enable = rtl8169_tbi_reset_enable;
8374 		tp->phy_reset_pending = rtl8169_tbi_reset_pending;
8375 		tp->link_ok = rtl8169_tbi_link_ok;
8376 		tp->do_ioctl = rtl_tbi_ioctl;
8377 	} else {
8378 		tp->set_speed = rtl8169_set_speed_xmii;
8379 		tp->get_link_ksettings = rtl8169_get_link_ksettings_xmii;
8380 		tp->phy_reset_enable = rtl8169_xmii_reset_enable;
8381 		tp->phy_reset_pending = rtl8169_xmii_reset_pending;
8382 		tp->link_ok = rtl8169_xmii_link_ok;
8383 		tp->do_ioctl = rtl_xmii_ioctl;
8384 	}
8385 
8386 	mutex_init(&tp->wk.mutex);
8387 	u64_stats_init(&tp->rx_stats.syncp);
8388 	u64_stats_init(&tp->tx_stats.syncp);
8389 
8390 	/* Get MAC address */
8391 	if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
8392 	    tp->mac_version == RTL_GIGA_MAC_VER_36 ||
8393 	    tp->mac_version == RTL_GIGA_MAC_VER_37 ||
8394 	    tp->mac_version == RTL_GIGA_MAC_VER_38 ||
8395 	    tp->mac_version == RTL_GIGA_MAC_VER_40 ||
8396 	    tp->mac_version == RTL_GIGA_MAC_VER_41 ||
8397 	    tp->mac_version == RTL_GIGA_MAC_VER_42 ||
8398 	    tp->mac_version == RTL_GIGA_MAC_VER_43 ||
8399 	    tp->mac_version == RTL_GIGA_MAC_VER_44 ||
8400 	    tp->mac_version == RTL_GIGA_MAC_VER_45 ||
8401 	    tp->mac_version == RTL_GIGA_MAC_VER_46 ||
8402 	    tp->mac_version == RTL_GIGA_MAC_VER_47 ||
8403 	    tp->mac_version == RTL_GIGA_MAC_VER_48 ||
8404 	    tp->mac_version == RTL_GIGA_MAC_VER_49 ||
8405 	    tp->mac_version == RTL_GIGA_MAC_VER_50 ||
8406 	    tp->mac_version == RTL_GIGA_MAC_VER_51) {
8407 		u16 mac_addr[3];
8408 
8409 		*(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC);
8410 		*(u16 *)&mac_addr[2] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC);
8411 
8412 		if (is_valid_ether_addr((u8 *)mac_addr))
8413 			rtl_rar_set(tp, (u8 *)mac_addr);
8414 	}
8415 	for (i = 0; i < ETH_ALEN; i++)
8416 		dev->dev_addr[i] = RTL_R8(MAC0 + i);
8417 
8418 	dev->ethtool_ops = &rtl8169_ethtool_ops;
8419 	dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
8420 
8421 	netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
8422 
8423 	/* don't enable SG, IP_CSUM and TSO by default - it might not work
8424 	 * properly for all devices */
8425 	dev->features |= NETIF_F_RXCSUM |
8426 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8427 
8428 	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
8429 		NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
8430 		NETIF_F_HW_VLAN_CTAG_RX;
8431 	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
8432 		NETIF_F_HIGHDMA;
8433 
8434 	tp->cp_cmd |= RxChkSum | RxVlan;
8435 
8436 	/*
8437 	 * Pretend we are using VLANs; This bypasses a nasty bug where
8438 	 * Interrupts stop flowing on high load on 8110SCd controllers.
8439 	 */
8440 	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
8441 		/* Disallow toggling */
8442 		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8443 
8444 	if (tp->txd_version == RTL_TD_0)
8445 		tp->tso_csum = rtl8169_tso_csum_v1;
8446 	else if (tp->txd_version == RTL_TD_1) {
8447 		tp->tso_csum = rtl8169_tso_csum_v2;
8448 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8449 	} else
8450 		WARN_ON_ONCE(1);
8451 
8452 	dev->hw_features |= NETIF_F_RXALL;
8453 	dev->hw_features |= NETIF_F_RXFCS;
8454 
8455 	/* MTU range: 60 - hw-specific max */
8456 	dev->min_mtu = ETH_ZLEN;
8457 	dev->max_mtu = rtl_chip_infos[chipset].jumbo_max;
8458 
8459 	tp->hw_start = cfg->hw_start;
8460 	tp->event_slow = cfg->event_slow;
8461 
8462 	tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
8463 		~(RxBOVF | RxFOVF) : ~0;
8464 
8465 	setup_timer(&tp->timer, rtl8169_phy_timer, (unsigned long)dev);
8466 
8467 	tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
8468 
8469 	tp->counters = dma_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
8470 					   &tp->counters_phys_addr, GFP_KERNEL);
8471 	if (!tp->counters) {
8472 		rc = -ENOMEM;
8473 		goto err_out_msi_5;
8474 	}
8475 
8476 	pci_set_drvdata(pdev, dev);
8477 
8478 	rc = register_netdev(dev);
8479 	if (rc < 0)
8480 		goto err_out_cnt_6;
8481 
8482 	netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
8483 		   rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
8484 		   (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
8485 	if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
8486 		netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
8487 			   "tx checksumming: %s]\n",
8488 			   rtl_chip_infos[chipset].jumbo_max,
8489 			   rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
8490 	}
8491 
8492 	if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
8493 	     tp->mac_version == RTL_GIGA_MAC_VER_28 ||
8494 	     tp->mac_version == RTL_GIGA_MAC_VER_31 ||
8495 	     tp->mac_version == RTL_GIGA_MAC_VER_49 ||
8496 	     tp->mac_version == RTL_GIGA_MAC_VER_50 ||
8497 	     tp->mac_version == RTL_GIGA_MAC_VER_51) &&
8498 	    r8168_check_dash(tp)) {
8499 		rtl8168_driver_start(tp);
8500 	}
8501 
8502 	if (pci_dev_run_wake(pdev))
8503 		pm_runtime_put_noidle(&pdev->dev);
8504 
8505 	netif_carrier_off(dev);
8506 
8507 out:
8508 	return rc;
8509 
8510 err_out_cnt_6:
8511 	dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters,
8512 			  tp->counters_phys_addr);
8513 err_out_msi_5:
8514 	netif_napi_del(&tp->napi);
8515 	rtl_disable_msi(pdev, tp);
8516 err_out_unmap_4:
8517 	iounmap(ioaddr);
8518 err_out_free_res_3:
8519 	pci_release_regions(pdev);
8520 err_out_mwi_2:
8521 	pci_clear_mwi(pdev);
8522 	pci_disable_device(pdev);
8523 err_out_free_dev_1:
8524 	free_netdev(dev);
8525 	goto out;
8526 }
8527 
8528 static struct pci_driver rtl8169_pci_driver = {
8529 	.name		= MODULENAME,
8530 	.id_table	= rtl8169_pci_tbl,
8531 	.probe		= rtl_init_one,
8532 	.remove		= rtl_remove_one,
8533 	.shutdown	= rtl_shutdown,
8534 	.driver.pm	= RTL8169_PM_OPS,
8535 };
8536 
8537 module_pci_driver(rtl8169_pci_driver);
8538